2007-10-12 21:53:12 +00:00
|
|
|
//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
|
2005-11-15 00:40:23 +00:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-11-15 00:40:23 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the interfaces that X86 uses to lower LLVM code into a
|
|
|
|
// selection DAG.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "X86.h"
|
2006-01-16 21:21:29 +00:00
|
|
|
#include "X86InstrBuilder.h"
|
2005-11-15 00:40:23 +00:00
|
|
|
#include "X86ISelLowering.h"
|
2006-06-06 23:30:24 +00:00
|
|
|
#include "X86MachineFunctionInfo.h"
|
2005-11-15 00:40:23 +00:00
|
|
|
#include "X86TargetMachine.h"
|
|
|
|
#include "llvm/CallingConv.h"
|
2006-01-31 22:28:30 +00:00
|
|
|
#include "llvm/Constants.h"
|
2006-04-28 21:29:37 +00:00
|
|
|
#include "llvm/DerivedTypes.h"
|
2007-04-20 21:38:10 +00:00
|
|
|
#include "llvm/GlobalVariable.h"
|
2005-11-15 00:40:23 +00:00
|
|
|
#include "llvm/Function.h"
|
2006-04-05 23:38:46 +00:00
|
|
|
#include "llvm/Intrinsics.h"
|
2007-12-11 01:46:18 +00:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2006-03-13 23:18:16 +00:00
|
|
|
#include "llvm/ADT/VectorExtras.h"
|
2007-02-27 04:43:02 +00:00
|
|
|
#include "llvm/CodeGen/CallingConvLower.h"
|
2005-11-15 00:40:23 +00:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2006-01-11 00:33:36 +00:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2008-02-02 04:07:54 +00:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfo.h"
|
2007-12-31 04:13:23 +00:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2008-02-06 22:27:42 +00:00
|
|
|
#include "llvm/CodeGen/PseudoSourceValue.h"
|
2005-11-15 00:40:23 +00:00
|
|
|
#include "llvm/CodeGen/SelectionDAG.h"
|
2006-01-31 03:14:29 +00:00
|
|
|
#include "llvm/Support/MathExtras.h"
|
2007-10-11 19:40:01 +00:00
|
|
|
#include "llvm/Support/Debug.h"
|
2005-11-15 00:40:23 +00:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2007-12-11 01:46:18 +00:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2006-10-31 19:42:44 +00:00
|
|
|
#include "llvm/ADT/StringExtras.h"
|
2005-11-15 00:40:23 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2008-04-25 19:11:04 +00:00
|
|
|
// Forward declarations.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG);
|
2008-04-25 19:11:04 +00:00
|
|
|
|
2008-05-14 01:58:56 +00:00
|
|
|
X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
2005-11-15 00:40:23 +00:00
|
|
|
: TargetLowering(TM) {
|
2006-01-27 08:10:46 +00:00
|
|
|
Subtarget = &TM.getSubtarget<X86Subtarget>();
|
2007-09-23 14:52:20 +00:00
|
|
|
X86ScalarSSEf64 = Subtarget->hasSSE2();
|
|
|
|
X86ScalarSSEf32 = Subtarget->hasSSE1();
|
2006-09-08 06:48:29 +00:00
|
|
|
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
|
2008-09-09 18:22:57 +00:00
|
|
|
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46307 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-24 08:07:48 +00:00
|
|
|
bool Fast = false;
|
2006-01-27 08:10:46 +00:00
|
|
|
|
2007-07-14 14:06:15 +00:00
|
|
|
RegInfo = TM.getRegisterInfo();
|
2008-09-09 18:22:57 +00:00
|
|
|
TD = getTargetData();
|
2007-07-14 14:06:15 +00:00
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
// Set up the TargetLowering object.
|
|
|
|
|
|
|
|
// X86 is weird, it always uses i8 for shift amounts and setcc results.
|
|
|
|
setShiftAmountType(MVT::i8);
|
|
|
|
setSetCCResultContents(ZeroOrOneSetCCResult);
|
2006-01-25 09:15:17 +00:00
|
|
|
setSchedulingPreference(SchedulingForRegPressure);
|
2005-11-15 00:40:23 +00:00
|
|
|
setShiftAmountFlavor(Mask); // shl X, 32 == shl X, 0
|
2006-09-08 06:48:29 +00:00
|
|
|
setStackPointerRegisterToSaveRestore(X86StackPtr);
|
2006-03-16 21:47:42 +00:00
|
|
|
|
2006-12-10 23:12:42 +00:00
|
|
|
if (Subtarget->isTargetDarwin()) {
|
2006-03-17 20:31:41 +00:00
|
|
|
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
|
2006-12-10 23:12:42 +00:00
|
|
|
setUseUnderscoreSetJmp(false);
|
|
|
|
setUseUnderscoreLongJmp(false);
|
2007-01-03 11:43:14 +00:00
|
|
|
} else if (Subtarget->isTargetMingw()) {
|
2006-12-10 23:12:42 +00:00
|
|
|
// MS runtime is weird: it exports _setjmp, but longjmp!
|
|
|
|
setUseUnderscoreSetJmp(true);
|
|
|
|
setUseUnderscoreLongJmp(false);
|
|
|
|
} else {
|
|
|
|
setUseUnderscoreSetJmp(true);
|
|
|
|
setUseUnderscoreLongJmp(true);
|
|
|
|
}
|
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
// Set up the register classes.
|
2006-05-16 07:21:53 +00:00
|
|
|
addRegisterClass(MVT::i8, X86::GR8RegisterClass);
|
|
|
|
addRegisterClass(MVT::i16, X86::GR16RegisterClass);
|
|
|
|
addRegisterClass(MVT::i32, X86::GR32RegisterClass);
|
2006-09-08 06:48:29 +00:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
addRegisterClass(MVT::i64, X86::GR64RegisterClass);
|
2005-11-15 00:40:23 +00:00
|
|
|
|
2008-10-14 21:26:46 +00:00
|
|
|
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
2006-10-04 00:56:09 +00:00
|
|
|
|
2008-01-17 19:59:44 +00:00
|
|
|
// We don't accept any truncstore of integer registers.
|
|
|
|
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
|
|
|
|
setTruncStoreAction(MVT::i64, MVT::i16, Expand);
|
|
|
|
setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
|
|
|
|
setTruncStoreAction(MVT::i32, MVT::i16, Expand);
|
|
|
|
setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
|
2008-10-15 02:05:31 +00:00
|
|
|
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
|
|
|
|
|
|
|
|
// SETOEQ and SETUNE require checking two conditions.
|
|
|
|
setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
|
|
|
|
setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
|
|
|
|
setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
|
|
|
|
setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
|
|
|
|
setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
|
|
|
|
setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
|
2008-01-17 19:59:44 +00:00
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
|
|
|
|
// operation.
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
|
2006-01-17 02:32:49 +00:00
|
|
|
|
2006-09-08 06:48:29 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
|
2006-01-17 02:32:49 +00:00
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
|
2006-09-08 06:48:29 +00:00
|
|
|
} else {
|
2008-10-21 20:50:01 +00:00
|
|
|
if (X86ScalarSSEf64) {
|
|
|
|
// We have an impenetrably clever algorithm for ui64->double only.
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
|
2006-09-08 06:48:29 +00:00
|
|
|
// If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
|
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
|
2008-10-21 20:50:01 +00:00
|
|
|
} else
|
2006-09-08 06:48:29 +00:00
|
|
|
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
|
|
|
|
}
|
2005-11-15 00:40:23 +00:00
|
|
|
|
|
|
|
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
|
|
|
|
// this operation.
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
|
2006-02-17 00:03:04 +00:00
|
|
|
// SSE has no i16 to fp conversion, only i32
|
2007-09-23 14:52:20 +00:00
|
|
|
if (X86ScalarSSEf32) {
|
2006-01-30 22:13:22 +00:00
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
|
2007-09-14 22:26:36 +00:00
|
|
|
// f32 and f64 cases are Legal, f80 case is not
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
|
|
|
|
} else {
|
2006-02-17 07:01:52 +00:00
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
|
|
|
|
}
|
2005-11-15 00:40:23 +00:00
|
|
|
|
2007-09-19 23:55:34 +00:00
|
|
|
// In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
|
|
|
|
// are Legal, f80 is custom lowered.
|
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
|
2006-01-30 08:02:57 +00:00
|
|
|
|
2006-01-30 22:13:22 +00:00
|
|
|
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
|
|
|
|
// this operation.
|
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
|
|
|
|
|
2007-09-23 14:52:20 +00:00
|
|
|
if (X86ScalarSSEf32) {
|
2006-01-30 22:13:22 +00:00
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
|
2007-09-14 22:26:36 +00:00
|
|
|
// f32 and f64 cases are Legal, f80 case is not
|
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
|
2006-01-30 22:13:22 +00:00
|
|
|
} else {
|
2005-11-15 00:40:23 +00:00
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
|
2006-01-30 22:13:22 +00:00
|
|
|
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Handle FP_TO_UINT by promoting the destination to a larger signed
|
|
|
|
// conversion.
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
|
|
|
|
|
2006-09-08 06:48:29 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
|
2005-11-15 00:40:23 +00:00
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
|
2006-09-08 06:48:29 +00:00
|
|
|
} else {
|
2007-09-23 14:52:20 +00:00
|
|
|
if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
|
2006-09-08 06:48:29 +00:00
|
|
|
// Expand FP_TO_UINT into a select.
|
|
|
|
// FIXME: We would like to use a Custom expander here eventually to do
|
|
|
|
// the optimal thing for SSE vs. the default expansion in the legalizer.
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
|
|
|
|
else
|
|
|
|
// With SSE3 we can use fisttpll to convert to a signed i64.
|
|
|
|
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
|
|
|
|
}
|
2005-11-15 00:40:23 +00:00
|
|
|
|
2006-12-05 18:22:22 +00:00
|
|
|
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
|
2007-09-23 14:52:20 +00:00
|
|
|
if (!X86ScalarSSEf64) {
|
2006-12-05 18:45:06 +00:00
|
|
|
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
|
|
|
|
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
|
|
|
|
}
|
2005-12-23 05:15:23 +00:00
|
|
|
|
2008-02-18 19:34:53 +00:00
|
|
|
// Scalar integer divide and remainder are lowered to use operations that
|
|
|
|
// produce two results, to match the available instructions. This exposes
|
|
|
|
// the two-result form to trivial CSE, which is able to combine x/y and x%y
|
|
|
|
// into a single instruction.
|
|
|
|
//
|
|
|
|
// Scalar integer multiply-high is also lowered to use two-result
|
|
|
|
// operations, to match the available instructions. However, plain multiply
|
|
|
|
// (low) operations are left as Legal, as there are single-result
|
|
|
|
// instructions for this in x86. Using the two-result multiply instructions
|
|
|
|
// when both high and low results are needed must be arranged by dagcombine.
|
2007-10-08 18:33:35 +00:00
|
|
|
setOperationAction(ISD::MULHS , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::MULHU , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::SDIV , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::UDIV , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::SREM , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::UREM , MVT::i8 , Expand);
|
|
|
|
setOperationAction(ISD::MULHS , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::MULHU , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::SDIV , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::UDIV , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::SREM , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::UREM , MVT::i16 , Expand);
|
|
|
|
setOperationAction(ISD::MULHS , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::MULHU , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::SDIV , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::UDIV , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::SREM , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::UREM , MVT::i32 , Expand);
|
|
|
|
setOperationAction(ISD::MULHS , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::MULHU , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::SDIV , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::UDIV , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::SREM , MVT::i64 , Expand);
|
|
|
|
setOperationAction(ISD::UREM , MVT::i64 , Expand);
|
2007-09-25 18:23:27 +00:00
|
|
|
|
2006-10-30 08:02:39 +00:00
|
|
|
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
|
2006-02-17 07:01:52 +00:00
|
|
|
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
|
2006-02-01 07:19:44 +00:00
|
|
|
setOperationAction(ISD::BR_CC , MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
|
2006-09-08 06:48:29 +00:00
|
|
|
if (Subtarget->is64Bit())
|
2007-08-10 21:48:46 +00:00
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
|
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
|
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
|
2005-11-15 00:40:23 +00:00
|
|
|
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
|
|
|
|
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
|
2008-03-07 06:36:32 +00:00
|
|
|
setOperationAction(ISD::FREM , MVT::f32 , Expand);
|
2005-11-15 00:40:23 +00:00
|
|
|
setOperationAction(ISD::FREM , MVT::f64 , Expand);
|
2008-03-07 06:36:32 +00:00
|
|
|
setOperationAction(ISD::FREM , MVT::f80 , Expand);
|
2008-01-31 00:41:03 +00:00
|
|
|
setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
|
2007-11-16 01:31:51 +00:00
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
|
2007-12-14 02:13:44 +00:00
|
|
|
setOperationAction(ISD::CTTZ , MVT::i8 , Custom);
|
|
|
|
setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
|
2005-11-15 00:40:23 +00:00
|
|
|
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
|
2007-12-14 02:13:44 +00:00
|
|
|
setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
|
|
|
|
setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
|
2005-11-15 00:40:23 +00:00
|
|
|
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
|
2007-12-14 02:13:44 +00:00
|
|
|
setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
|
2006-09-08 06:48:29 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
|
2007-12-14 02:13:44 +00:00
|
|
|
setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
|
2006-09-08 06:48:29 +00:00
|
|
|
}
|
|
|
|
|
2005-11-20 21:41:10 +00:00
|
|
|
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
|
2006-01-14 03:14:10 +00:00
|
|
|
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
|
2006-01-11 21:21:00 +00:00
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
// These should be promoted to a larger select which is supported.
|
|
|
|
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
|
|
|
|
setOperationAction(ISD::SELECT , MVT::i8 , Promote);
|
2006-02-17 00:03:04 +00:00
|
|
|
// X86 wants to expand cmov itself.
|
2006-02-17 07:01:52 +00:00
|
|
|
setOperationAction(ISD::SELECT , MVT::i16 , Custom);
|
|
|
|
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
|
|
|
|
setOperationAction(ISD::SELECT , MVT::f64 , Custom);
|
2007-09-14 22:26:36 +00:00
|
|
|
setOperationAction(ISD::SELECT , MVT::f80 , Custom);
|
2006-02-17 07:01:52 +00:00
|
|
|
setOperationAction(ISD::SETCC , MVT::i8 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::i16 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::f32 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::f64 , Custom);
|
2007-09-14 22:26:36 +00:00
|
|
|
setOperationAction(ISD::SETCC , MVT::f80 , Custom);
|
2006-09-08 06:48:29 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::SELECT , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
|
|
|
|
}
|
2006-02-17 00:03:04 +00:00
|
|
|
// X86 ret instruction may pop stack.
|
2006-02-17 07:01:52 +00:00
|
|
|
setOperationAction(ISD::RET , MVT::Other, Custom);
|
2008-09-08 21:12:11 +00:00
|
|
|
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
|
2007-07-14 14:06:15 +00:00
|
|
|
|
2006-02-17 00:03:04 +00:00
|
|
|
// Darwin ABI issue.
|
2006-02-18 00:15:05 +00:00
|
|
|
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
|
2006-04-22 18:53:45 +00:00
|
|
|
setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
|
2006-02-17 07:01:52 +00:00
|
|
|
setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
|
2007-04-20 21:38:10 +00:00
|
|
|
setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
|
2008-05-04 21:36:32 +00:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
|
2008-09-16 21:48:12 +00:00
|
|
|
setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
|
2006-09-08 06:48:29 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
|
2008-09-16 21:48:12 +00:00
|
|
|
setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
|
2006-09-08 06:48:29 +00:00
|
|
|
}
|
2006-02-17 00:03:04 +00:00
|
|
|
// 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
|
2006-02-17 07:01:52 +00:00
|
|
|
setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
|
|
|
|
setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
|
2008-03-03 22:22:09 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
|
|
|
|
setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
|
|
|
|
}
|
2005-11-15 00:40:23 +00:00
|
|
|
|
2008-03-10 19:38:10 +00:00
|
|
|
if (Subtarget->hasSSE1())
|
|
|
|
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
|
2008-03-08 00:58:38 +00:00
|
|
|
|
2008-02-16 14:46:26 +00:00
|
|
|
if (!Subtarget->hasSSE2())
|
|
|
|
setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
|
|
|
|
|
2008-05-05 19:05:59 +00:00
|
|
|
// Expand certain atomics
|
2008-08-28 02:44:49 +00:00
|
|
|
setOperationAction(ISD::ATOMIC_CMP_SWAP_8 , MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_CMP_SWAP_16, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_CMP_SWAP_32, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_CMP_SWAP_64, MVT::i64, Custom);
|
2008-08-20 00:28:16 +00:00
|
|
|
|
2008-09-29 22:25:26 +00:00
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_SUB_8 , MVT::i8, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_SUB_16, MVT::i16, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_SUB_32, MVT::i32, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Custom);
|
2008-02-16 14:46:26 +00:00
|
|
|
|
2008-10-02 18:53:47 +00:00
|
|
|
if (!Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_ADD_64, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_SUB_64, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_AND_64, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_OR_64, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_XOR_64, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_LOAD_NAND_64, MVT::i64, Custom);
|
|
|
|
setOperationAction(ISD::ATOMIC_SWAP_64, MVT::i64, Custom);
|
|
|
|
}
|
|
|
|
|
2008-06-30 20:59:49 +00:00
|
|
|
// Use the default ISD::DBG_STOPPOINT, ISD::DECLARE expansion.
|
|
|
|
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
|
2006-03-07 02:02:57 +00:00
|
|
|
// FIXME - use subtarget debug flags
|
2006-10-31 08:31:24 +00:00
|
|
|
if (!Subtarget->isTargetDarwin() &&
|
|
|
|
!Subtarget->isTargetELF() &&
|
2008-07-01 00:05:16 +00:00
|
|
|
!Subtarget->isTargetCygMing()) {
|
|
|
|
setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
|
|
|
|
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
|
|
|
|
}
|
2005-11-29 06:16:21 +00:00
|
|
|
|
2007-05-02 19:53:33 +00:00
|
|
|
setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
|
|
|
|
setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
|
|
|
|
setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
|
|
|
|
setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
|
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setExceptionPointerRegister(X86::RAX);
|
|
|
|
setExceptionSelectorRegister(X86::RDX);
|
|
|
|
} else {
|
|
|
|
setExceptionPointerRegister(X86::EAX);
|
|
|
|
setExceptionSelectorRegister(X86::EDX);
|
|
|
|
}
|
2007-09-03 00:36:06 +00:00
|
|
|
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
|
2008-09-08 21:12:11 +00:00
|
|
|
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
|
|
|
|
|
2007-09-11 14:10:23 +00:00
|
|
|
setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
|
2007-07-27 20:02:49 +00:00
|
|
|
|
2008-01-15 21:58:22 +00:00
|
|
|
setOperationAction(ISD::TRAP, MVT::Other, Legal);
|
2008-01-15 07:02:33 +00:00
|
|
|
|
2006-01-25 18:21:52 +00:00
|
|
|
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
|
|
|
|
setOperationAction(ISD::VASTART , MVT::Other, Custom);
|
|
|
|
setOperationAction(ISD::VAEND , MVT::Other, Expand);
|
2008-05-10 01:26:14 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::VAARG , MVT::Other, Custom);
|
2007-03-02 23:16:35 +00:00
|
|
|
setOperationAction(ISD::VACOPY , MVT::Other, Custom);
|
2008-05-10 01:26:14 +00:00
|
|
|
} else {
|
|
|
|
setOperationAction(ISD::VAARG , MVT::Other, Expand);
|
2007-03-02 23:16:35 +00:00
|
|
|
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
|
2008-05-10 01:26:14 +00:00
|
|
|
}
|
2007-03-02 23:16:35 +00:00
|
|
|
|
2006-11-21 00:01:06 +00:00
|
|
|
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
|
2006-01-15 09:00:21 +00:00
|
|
|
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
|
2006-09-08 06:48:29 +00:00
|
|
|
if (Subtarget->is64Bit())
|
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
|
2007-04-17 09:20:00 +00:00
|
|
|
if (Subtarget->isTargetCygMing())
|
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
|
|
|
|
else
|
|
|
|
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
|
2006-01-13 02:42:53 +00:00
|
|
|
|
2007-09-23 14:52:20 +00:00
|
|
|
if (X86ScalarSSEf64) {
|
|
|
|
// f32 and f64 use SSE.
|
2005-11-15 00:40:23 +00:00
|
|
|
// Set up the FP register classes.
|
2006-01-12 08:27:59 +00:00
|
|
|
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
|
|
|
|
addRegisterClass(MVT::f64, X86::FR64RegisterClass);
|
2005-11-15 00:40:23 +00:00
|
|
|
|
2006-01-31 22:28:30 +00:00
|
|
|
// Use ANDPD to simulate FABS.
|
|
|
|
setOperationAction(ISD::FABS , MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::FABS , MVT::f32, Custom);
|
|
|
|
|
|
|
|
// Use XORP to simulate FNEG.
|
|
|
|
setOperationAction(ISD::FNEG , MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::FNEG , MVT::f32, Custom);
|
|
|
|
|
2007-01-05 07:55:56 +00:00
|
|
|
// Use ANDPD and ORPD to simulate FCOPYSIGN.
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
|
|
|
|
|
2006-02-02 00:28:23 +00:00
|
|
|
// We don't support sin/cos/fmod
|
2005-11-15 00:40:23 +00:00
|
|
|
setOperationAction(ISD::FSIN , MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f32, Expand);
|
|
|
|
|
2006-01-29 06:26:08 +00:00
|
|
|
// Expand FP immediates into loads from the stack, except for the special
|
|
|
|
// cases we handle.
|
2007-09-23 14:52:20 +00:00
|
|
|
addLegalFPImmediate(APFloat(+0.0)); // xorpd
|
|
|
|
addLegalFPImmediate(APFloat(+0.0f)); // xorps
|
2007-08-09 01:04:01 +00:00
|
|
|
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46307 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-24 08:07:48 +00:00
|
|
|
// Floating truncations from f80 and extensions to f80 go through memory.
|
|
|
|
// If optimizing, we lie about this though and handle it in
|
|
|
|
// InstructionSelectPreprocess so that dagcombine2 can hack on these.
|
|
|
|
if (Fast) {
|
|
|
|
setConvertAction(MVT::f32, MVT::f80, Expand);
|
|
|
|
setConvertAction(MVT::f64, MVT::f80, Expand);
|
|
|
|
setConvertAction(MVT::f80, MVT::f32, Expand);
|
|
|
|
setConvertAction(MVT::f80, MVT::f64, Expand);
|
|
|
|
}
|
2007-09-23 14:52:20 +00:00
|
|
|
} else if (X86ScalarSSEf32) {
|
|
|
|
// Use SSE for f32, x87 for f64.
|
|
|
|
// Set up the FP register classes.
|
|
|
|
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
|
|
|
|
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
|
|
|
|
|
|
|
|
// Use ANDPS to simulate FABS.
|
|
|
|
setOperationAction(ISD::FABS , MVT::f32, Custom);
|
|
|
|
|
|
|
|
// Use XORP to simulate FNEG.
|
|
|
|
setOperationAction(ISD::FNEG , MVT::f32, Custom);
|
|
|
|
|
|
|
|
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
|
|
|
|
|
|
|
|
// Use ANDPS and ORPS to simulate FCOPYSIGN.
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
|
|
|
|
|
|
|
|
// We don't support sin/cos/fmod
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f32, Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f32, Expand);
|
|
|
|
|
2008-02-14 08:57:00 +00:00
|
|
|
// Special cases we handle for FP constants.
|
2007-09-23 14:52:20 +00:00
|
|
|
addLegalFPImmediate(APFloat(+0.0f)); // xorps
|
|
|
|
addLegalFPImmediate(APFloat(+0.0)); // FLD0
|
|
|
|
addLegalFPImmediate(APFloat(+1.0)); // FLD1
|
|
|
|
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
|
|
|
|
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
|
|
|
|
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46307 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-24 08:07:48 +00:00
|
|
|
// SSE <-> X87 conversions go through memory. If optimizing, we lie about
|
|
|
|
// this though and handle it in InstructionSelectPreprocess so that
|
|
|
|
// dagcombine2 can hack on these.
|
|
|
|
if (Fast) {
|
|
|
|
setConvertAction(MVT::f32, MVT::f64, Expand);
|
|
|
|
setConvertAction(MVT::f32, MVT::f80, Expand);
|
|
|
|
setConvertAction(MVT::f80, MVT::f32, Expand);
|
|
|
|
setConvertAction(MVT::f64, MVT::f32, Expand);
|
|
|
|
// And x87->x87 truncations also.
|
|
|
|
setConvertAction(MVT::f80, MVT::f64, Expand);
|
|
|
|
}
|
2007-09-23 14:52:20 +00:00
|
|
|
|
|
|
|
if (!UnsafeFPMath) {
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
|
|
|
|
}
|
2005-11-15 00:40:23 +00:00
|
|
|
} else {
|
2007-09-23 14:52:20 +00:00
|
|
|
// f32 and f64 in x87.
|
2005-11-15 00:40:23 +00:00
|
|
|
// Set up the FP register classes.
|
2007-07-03 00:53:03 +00:00
|
|
|
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
|
|
|
|
addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2007-01-05 07:55:56 +00:00
|
|
|
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
|
2007-07-03 00:53:03 +00:00
|
|
|
setOperationAction(ISD::UNDEF, MVT::f32, Expand);
|
2007-01-05 07:55:56 +00:00
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
|
2007-08-09 01:04:01 +00:00
|
|
|
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46307 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-24 08:07:48 +00:00
|
|
|
// Floating truncations go through memory. If optimizing, we lie about
|
|
|
|
// this though and handle it in InstructionSelectPreprocess so that
|
|
|
|
// dagcombine2 can hack on these.
|
|
|
|
if (Fast) {
|
|
|
|
setConvertAction(MVT::f80, MVT::f32, Expand);
|
|
|
|
setConvertAction(MVT::f64, MVT::f32, Expand);
|
|
|
|
setConvertAction(MVT::f80, MVT::f64, Expand);
|
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
if (!UnsafeFPMath) {
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f64 , Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f64 , Expand);
|
|
|
|
}
|
2007-08-30 00:23:21 +00:00
|
|
|
addLegalFPImmediate(APFloat(+0.0)); // FLD0
|
|
|
|
addLegalFPImmediate(APFloat(+1.0)); // FLD1
|
|
|
|
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
|
|
|
|
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
|
2007-09-23 14:52:20 +00:00
|
|
|
addLegalFPImmediate(APFloat(+0.0f)); // FLD0
|
|
|
|
addLegalFPImmediate(APFloat(+1.0f)); // FLD1
|
|
|
|
addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
|
|
|
|
addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
2006-02-22 02:26:30 +00:00
|
|
|
|
2007-08-05 18:49:15 +00:00
|
|
|
// Long double always uses X87.
|
|
|
|
addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
|
2007-09-14 22:26:36 +00:00
|
|
|
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
|
2008-01-27 06:19:31 +00:00
|
|
|
{
|
2008-10-09 23:00:39 +00:00
|
|
|
bool ignored;
|
2008-01-27 06:19:31 +00:00
|
|
|
APFloat TmpFlt(+0.0);
|
2008-10-09 23:00:39 +00:00
|
|
|
TmpFlt.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
|
|
|
|
&ignored);
|
2008-01-27 06:19:31 +00:00
|
|
|
addLegalFPImmediate(TmpFlt); // FLD0
|
|
|
|
TmpFlt.changeSign();
|
|
|
|
addLegalFPImmediate(TmpFlt); // FLD0/FCHS
|
|
|
|
APFloat TmpFlt2(+1.0);
|
2008-10-09 23:00:39 +00:00
|
|
|
TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
|
|
|
|
&ignored);
|
2008-01-27 06:19:31 +00:00
|
|
|
addLegalFPImmediate(TmpFlt2); // FLD1
|
|
|
|
TmpFlt2.changeSign();
|
|
|
|
addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
|
|
|
|
}
|
|
|
|
|
2007-09-26 21:10:55 +00:00
|
|
|
if (!UnsafeFPMath) {
|
|
|
|
setOperationAction(ISD::FSIN , MVT::f80 , Expand);
|
|
|
|
setOperationAction(ISD::FCOS , MVT::f80 , Expand);
|
|
|
|
}
|
2007-08-05 18:49:15 +00:00
|
|
|
|
2007-10-11 23:21:31 +00:00
|
|
|
// Always use a library call for pow.
|
|
|
|
setOperationAction(ISD::FPOW , MVT::f32 , Expand);
|
|
|
|
setOperationAction(ISD::FPOW , MVT::f64 , Expand);
|
|
|
|
setOperationAction(ISD::FPOW , MVT::f80 , Expand);
|
|
|
|
|
2008-09-04 00:47:13 +00:00
|
|
|
setOperationAction(ISD::FLOG, MVT::f80, Expand);
|
|
|
|
setOperationAction(ISD::FLOG2, MVT::f80, Expand);
|
|
|
|
setOperationAction(ISD::FLOG10, MVT::f80, Expand);
|
|
|
|
setOperationAction(ISD::FEXP, MVT::f80, Expand);
|
|
|
|
setOperationAction(ISD::FEXP2, MVT::f80, Expand);
|
|
|
|
|
2008-11-06 05:31:54 +00:00
|
|
|
// First set operation action for all vector types to either promote
|
2008-10-30 08:01:45 +00:00
|
|
|
// (for widening) or expand (for scalarization). Then we will selectively
|
|
|
|
// turn on ones that can be effectively codegen'd.
|
2007-05-18 18:44:07 +00:00
|
|
|
for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
|
|
|
|
VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
|
2008-06-06 12:08:01 +00:00
|
|
|
setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand);
|
2008-08-28 23:19:51 +00:00
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT,(MVT::SimpleValueType)VT,Expand);
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT,(MVT::SimpleValueType)VT, Expand);
|
2008-06-06 12:08:01 +00:00
|
|
|
setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand);
|
2008-09-10 17:31:40 +00:00
|
|
|
setOperationAction(ISD::FLOG, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FLOG2, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FLOG10, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FEXP, (MVT::SimpleValueType)VT, Expand);
|
|
|
|
setOperationAction(ISD::FEXP2, (MVT::SimpleValueType)VT, Expand);
|
2006-03-01 01:11:20 +00:00
|
|
|
}
|
|
|
|
|
2006-03-22 19:22:18 +00:00
|
|
|
if (Subtarget->hasMMX()) {
|
2006-02-22 02:26:30 +00:00
|
|
|
addRegisterClass(MVT::v8i8, X86::VR64RegisterClass);
|
|
|
|
addRegisterClass(MVT::v4i16, X86::VR64RegisterClass);
|
|
|
|
addRegisterClass(MVT::v2i32, X86::VR64RegisterClass);
|
2008-06-24 22:01:44 +00:00
|
|
|
addRegisterClass(MVT::v2f32, X86::VR64RegisterClass);
|
Add support for the v1i64 type. This makes better code for this:
#include <mmintrin.h>
extern __m64 C;
void baz(__v2si *A, __v2si *B)
{
*A = C;
_mm_empty();
}
We get this:
_baz:
call "L1$pb"
"L1$pb":
popl %eax
movl L_C$non_lazy_ptr-"L1$pb"(%eax), %eax
movq (%eax), %mm0
movl 4(%esp), %eax
movq %mm0, (%eax)
emms
ret
GCC gives us this:
_baz:
pushl %ebx
call L3
"L00000000001$pb":
L3:
popl %ebx
subl $8, %esp
movl L_C$non_lazy_ptr-"L00000000001$pb"(%ebx), %eax
movl (%eax), %edx
movl 4(%eax), %ecx
movl 16(%esp), %eax
movl %edx, (%eax)
movl %ecx, 4(%eax)
emms
addl $8, %esp
popl %ebx
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35351 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-26 07:53:08 +00:00
|
|
|
addRegisterClass(MVT::v1i64, X86::VR64RegisterClass);
|
2006-02-22 02:26:30 +00:00
|
|
|
|
2006-03-01 01:11:20 +00:00
|
|
|
// FIXME: add MMX packed arithmetics
|
2007-03-08 22:09:11 +00:00
|
|
|
|
|
|
|
setOperationAction(ISD::ADD, MVT::v8i8, Legal);
|
|
|
|
setOperationAction(ISD::ADD, MVT::v4i16, Legal);
|
|
|
|
setOperationAction(ISD::ADD, MVT::v2i32, Legal);
|
2007-04-12 04:14:49 +00:00
|
|
|
setOperationAction(ISD::ADD, MVT::v1i64, Legal);
|
2007-03-08 22:09:11 +00:00
|
|
|
|
2007-03-10 09:57:05 +00:00
|
|
|
setOperationAction(ISD::SUB, MVT::v8i8, Legal);
|
|
|
|
setOperationAction(ISD::SUB, MVT::v4i16, Legal);
|
|
|
|
setOperationAction(ISD::SUB, MVT::v2i32, Legal);
|
2007-10-30 01:18:38 +00:00
|
|
|
setOperationAction(ISD::SUB, MVT::v1i64, Legal);
|
2007-03-10 09:57:05 +00:00
|
|
|
|
2007-03-15 21:24:36 +00:00
|
|
|
setOperationAction(ISD::MULHS, MVT::v4i16, Legal);
|
|
|
|
setOperationAction(ISD::MUL, MVT::v4i16, Legal);
|
|
|
|
|
2007-03-16 09:44:46 +00:00
|
|
|
setOperationAction(ISD::AND, MVT::v8i8, Promote);
|
2007-03-26 08:03:33 +00:00
|
|
|
AddPromotedToType (ISD::AND, MVT::v8i8, MVT::v1i64);
|
2007-03-16 09:44:46 +00:00
|
|
|
setOperationAction(ISD::AND, MVT::v4i16, Promote);
|
2007-03-26 08:03:33 +00:00
|
|
|
AddPromotedToType (ISD::AND, MVT::v4i16, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::AND, MVT::v2i32, Promote);
|
|
|
|
AddPromotedToType (ISD::AND, MVT::v2i32, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::AND, MVT::v1i64, Legal);
|
2007-03-16 09:44:46 +00:00
|
|
|
|
|
|
|
setOperationAction(ISD::OR, MVT::v8i8, Promote);
|
2007-03-26 08:03:33 +00:00
|
|
|
AddPromotedToType (ISD::OR, MVT::v8i8, MVT::v1i64);
|
2007-03-16 09:44:46 +00:00
|
|
|
setOperationAction(ISD::OR, MVT::v4i16, Promote);
|
2007-03-26 08:03:33 +00:00
|
|
|
AddPromotedToType (ISD::OR, MVT::v4i16, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::OR, MVT::v2i32, Promote);
|
|
|
|
AddPromotedToType (ISD::OR, MVT::v2i32, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::OR, MVT::v1i64, Legal);
|
2007-03-16 09:44:46 +00:00
|
|
|
|
|
|
|
setOperationAction(ISD::XOR, MVT::v8i8, Promote);
|
2007-03-26 08:03:33 +00:00
|
|
|
AddPromotedToType (ISD::XOR, MVT::v8i8, MVT::v1i64);
|
2007-03-16 09:44:46 +00:00
|
|
|
setOperationAction(ISD::XOR, MVT::v4i16, Promote);
|
2007-03-26 08:03:33 +00:00
|
|
|
AddPromotedToType (ISD::XOR, MVT::v4i16, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::XOR, MVT::v2i32, Promote);
|
|
|
|
AddPromotedToType (ISD::XOR, MVT::v2i32, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::XOR, MVT::v1i64, Legal);
|
2007-03-16 09:44:46 +00:00
|
|
|
|
2007-03-08 22:09:11 +00:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v8i8, Promote);
|
Add support for the v1i64 type. This makes better code for this:
#include <mmintrin.h>
extern __m64 C;
void baz(__v2si *A, __v2si *B)
{
*A = C;
_mm_empty();
}
We get this:
_baz:
call "L1$pb"
"L1$pb":
popl %eax
movl L_C$non_lazy_ptr-"L1$pb"(%eax), %eax
movq (%eax), %mm0
movl 4(%esp), %eax
movq %mm0, (%eax)
emms
ret
GCC gives us this:
_baz:
pushl %ebx
call L3
"L00000000001$pb":
L3:
popl %ebx
subl $8, %esp
movl L_C$non_lazy_ptr-"L00000000001$pb"(%ebx), %eax
movl (%eax), %edx
movl 4(%eax), %ecx
movl 16(%esp), %eax
movl %edx, (%eax)
movl %ecx, 4(%eax)
emms
addl $8, %esp
popl %ebx
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35351 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-26 07:53:08 +00:00
|
|
|
AddPromotedToType (ISD::LOAD, MVT::v8i8, MVT::v1i64);
|
2007-03-08 22:09:11 +00:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v4i16, Promote);
|
Add support for the v1i64 type. This makes better code for this:
#include <mmintrin.h>
extern __m64 C;
void baz(__v2si *A, __v2si *B)
{
*A = C;
_mm_empty();
}
We get this:
_baz:
call "L1$pb"
"L1$pb":
popl %eax
movl L_C$non_lazy_ptr-"L1$pb"(%eax), %eax
movq (%eax), %mm0
movl 4(%esp), %eax
movq %mm0, (%eax)
emms
ret
GCC gives us this:
_baz:
pushl %ebx
call L3
"L00000000001$pb":
L3:
popl %ebx
subl $8, %esp
movl L_C$non_lazy_ptr-"L00000000001$pb"(%ebx), %eax
movl (%eax), %edx
movl 4(%eax), %ecx
movl 16(%esp), %eax
movl %edx, (%eax)
movl %ecx, 4(%eax)
emms
addl $8, %esp
popl %ebx
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35351 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-26 07:53:08 +00:00
|
|
|
AddPromotedToType (ISD::LOAD, MVT::v4i16, MVT::v1i64);
|
|
|
|
setOperationAction(ISD::LOAD, MVT::v2i32, Promote);
|
|
|
|
AddPromotedToType (ISD::LOAD, MVT::v2i32, MVT::v1i64);
|
2008-06-24 22:01:44 +00:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
|
|
|
|
AddPromotedToType (ISD::LOAD, MVT::v2f32, MVT::v1i64);
|
Add support for the v1i64 type. This makes better code for this:
#include <mmintrin.h>
extern __m64 C;
void baz(__v2si *A, __v2si *B)
{
*A = C;
_mm_empty();
}
We get this:
_baz:
call "L1$pb"
"L1$pb":
popl %eax
movl L_C$non_lazy_ptr-"L1$pb"(%eax), %eax
movq (%eax), %mm0
movl 4(%esp), %eax
movq %mm0, (%eax)
emms
ret
GCC gives us this:
_baz:
pushl %ebx
call L3
"L00000000001$pb":
L3:
popl %ebx
subl $8, %esp
movl L_C$non_lazy_ptr-"L00000000001$pb"(%ebx), %eax
movl (%eax), %edx
movl 4(%eax), %ecx
movl 16(%esp), %eax
movl %edx, (%eax)
movl %ecx, 4(%eax)
emms
addl $8, %esp
popl %ebx
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35351 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-26 07:53:08 +00:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v1i64, Legal);
|
2007-03-08 22:09:11 +00:00
|
|
|
|
2007-03-27 20:22:40 +00:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
|
2008-06-24 22:01:44 +00:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
|
2007-03-27 20:22:40 +00:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
|
2007-03-22 18:42:45 +00:00
|
|
|
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
|
2007-03-27 20:22:40 +00:00
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
|
2007-03-28 00:57:11 +00:00
|
|
|
|
2008-07-22 18:39:19 +00:00
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f32, Custom);
|
2007-03-28 00:57:11 +00:00
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Custom);
|
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Custom);
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36403 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-24 21:16:55 +00:00
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Custom);
|
2008-07-20 02:32:23 +00:00
|
|
|
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i16, Custom);
|
2006-02-22 02:26:30 +00:00
|
|
|
}
|
|
|
|
|
2006-03-22 19:22:18 +00:00
|
|
|
if (Subtarget->hasSSE1()) {
|
2006-02-22 02:26:30 +00:00
|
|
|
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
|
|
|
|
|
2006-10-27 18:49:08 +00:00
|
|
|
setOperationAction(ISD::FADD, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
|
2007-07-10 00:05:58 +00:00
|
|
|
setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
|
2006-04-10 07:23:14 +00:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
|
2006-04-03 20:53:28 +00:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
|
2006-04-10 07:23:14 +00:00
|
|
|
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
|
2008-07-17 16:51:19 +00:00
|
|
|
setOperationAction(ISD::VSETCC, MVT::v4f32, Custom);
|
2006-02-22 02:26:30 +00:00
|
|
|
}
|
|
|
|
|
2006-03-22 19:22:18 +00:00
|
|
|
if (Subtarget->hasSSE2()) {
|
2006-02-22 02:26:30 +00:00
|
|
|
addRegisterClass(MVT::v2f64, X86::VR128RegisterClass);
|
|
|
|
addRegisterClass(MVT::v16i8, X86::VR128RegisterClass);
|
|
|
|
addRegisterClass(MVT::v8i16, X86::VR128RegisterClass);
|
|
|
|
addRegisterClass(MVT::v4i32, X86::VR128RegisterClass);
|
|
|
|
addRegisterClass(MVT::v2i64, X86::VR128RegisterClass);
|
|
|
|
|
2006-04-10 07:23:14 +00:00
|
|
|
setOperationAction(ISD::ADD, MVT::v16i8, Legal);
|
|
|
|
setOperationAction(ISD::ADD, MVT::v8i16, Legal);
|
|
|
|
setOperationAction(ISD::ADD, MVT::v4i32, Legal);
|
2007-03-12 22:58:52 +00:00
|
|
|
setOperationAction(ISD::ADD, MVT::v2i64, Legal);
|
2006-04-10 07:23:14 +00:00
|
|
|
setOperationAction(ISD::SUB, MVT::v16i8, Legal);
|
|
|
|
setOperationAction(ISD::SUB, MVT::v8i16, Legal);
|
|
|
|
setOperationAction(ISD::SUB, MVT::v4i32, Legal);
|
2007-03-12 22:58:52 +00:00
|
|
|
setOperationAction(ISD::SUB, MVT::v2i64, Legal);
|
2006-04-13 05:10:25 +00:00
|
|
|
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
|
2006-10-27 18:49:08 +00:00
|
|
|
setOperationAction(ISD::FADD, MVT::v2f64, Legal);
|
|
|
|
setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
|
|
|
|
setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
|
|
|
|
setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
|
2007-07-10 00:05:58 +00:00
|
|
|
setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
|
|
|
|
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
|
2006-04-12 21:21:57 +00:00
|
|
|
|
2008-07-17 16:51:19 +00:00
|
|
|
setOperationAction(ISD::VSETCC, MVT::v2f64, Custom);
|
|
|
|
setOperationAction(ISD::VSETCC, MVT::v16i8, Custom);
|
|
|
|
setOperationAction(ISD::VSETCC, MVT::v8i16, Custom);
|
|
|
|
setOperationAction(ISD::VSETCC, MVT::v4i32, Custom);
|
2008-05-12 20:34:32 +00:00
|
|
|
|
2006-04-10 07:23:14 +00:00
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
|
|
|
|
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
|
2006-04-12 21:21:57 +00:00
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
|
2006-04-17 22:04:06 +00:00
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
|
2006-04-12 21:21:57 +00:00
|
|
|
|
|
|
|
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
|
2008-06-06 12:08:01 +00:00
|
|
|
for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) {
|
|
|
|
MVT VT = (MVT::SimpleValueType)i;
|
2007-12-11 01:41:33 +00:00
|
|
|
// Do not attempt to custom lower non-power-of-2 vectors
|
2008-06-06 12:08:01 +00:00
|
|
|
if (!isPowerOf2_32(VT.getVectorNumElements()))
|
2007-12-11 01:41:33 +00:00
|
|
|
continue;
|
2008-06-06 12:08:01 +00:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
|
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
|
2006-04-12 21:21:57 +00:00
|
|
|
}
|
2006-04-10 07:23:14 +00:00
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
|
|
|
|
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
|
|
|
|
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
|
2008-02-12 22:51:28 +00:00
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
|
2006-04-03 20:53:28 +00:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
|
2008-02-12 22:51:28 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
|
2007-10-31 00:32:36 +00:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
|
2008-02-12 22:51:28 +00:00
|
|
|
}
|
2006-04-12 21:21:57 +00:00
|
|
|
|
2006-11-21 00:01:06 +00:00
|
|
|
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
|
2006-04-12 21:21:57 +00:00
|
|
|
for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
|
2008-06-06 12:08:01 +00:00
|
|
|
setOperationAction(ISD::AND, (MVT::SimpleValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::AND, (MVT::SimpleValueType)VT, MVT::v2i64);
|
|
|
|
setOperationAction(ISD::OR, (MVT::SimpleValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::OR, (MVT::SimpleValueType)VT, MVT::v2i64);
|
|
|
|
setOperationAction(ISD::XOR, (MVT::SimpleValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::XOR, (MVT::SimpleValueType)VT, MVT::v2i64);
|
|
|
|
setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::LOAD, (MVT::SimpleValueType)VT, MVT::v2i64);
|
|
|
|
setOperationAction(ISD::SELECT, (MVT::SimpleValueType)VT, Promote);
|
|
|
|
AddPromotedToType (ISD::SELECT, (MVT::SimpleValueType)VT, MVT::v2i64);
|
2006-04-10 07:23:14 +00:00
|
|
|
}
|
2006-04-12 21:21:57 +00:00
|
|
|
|
2008-01-17 19:59:44 +00:00
|
|
|
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46307 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-24 08:07:48 +00:00
|
|
|
|
2006-04-12 21:21:57 +00:00
|
|
|
// Custom lower v2i64 and v2f64 selects.
|
|
|
|
setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
|
2006-04-12 17:12:36 +00:00
|
|
|
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
|
2006-04-10 07:23:14 +00:00
|
|
|
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
|
2006-04-12 21:21:57 +00:00
|
|
|
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
|
2008-05-12 20:34:32 +00:00
|
|
|
|
2006-02-22 02:26:30 +00:00
|
|
|
}
|
2008-02-11 04:19:36 +00:00
|
|
|
|
|
|
|
if (Subtarget->hasSSE41()) {
|
|
|
|
// FIXME: Do we need to handle scalar-to-vector here?
|
|
|
|
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
|
2008-05-23 17:49:40 +00:00
|
|
|
setOperationAction(ISD::MUL, MVT::v2i64, Legal);
|
2008-02-11 04:19:36 +00:00
|
|
|
|
|
|
|
// i8 and i16 vectors are custom , because the source register and source
|
|
|
|
// source memory operand types are not the same width. f32 vectors are
|
|
|
|
// custom since the immediate controlling the insert encodes additional
|
|
|
|
// information.
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Legal);
|
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
|
|
|
|
|
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
|
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
|
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
|
2008-03-24 21:52:23 +00:00
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
|
2008-02-11 04:19:36 +00:00
|
|
|
|
|
|
|
if (Subtarget->is64Bit()) {
|
2008-02-12 22:51:28 +00:00
|
|
|
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Legal);
|
|
|
|
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
|
2008-02-11 04:19:36 +00:00
|
|
|
}
|
|
|
|
}
|
2006-02-22 02:26:30 +00:00
|
|
|
|
2008-07-17 16:51:19 +00:00
|
|
|
if (Subtarget->hasSSE42()) {
|
|
|
|
setOperationAction(ISD::VSETCC, MVT::v2i64, Custom);
|
|
|
|
}
|
|
|
|
|
2006-04-05 23:38:46 +00:00
|
|
|
// We want to custom lower some of our intrinsics.
|
|
|
|
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
|
|
|
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
// We have target-specific dag combine patterns for the following nodes:
|
|
|
|
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
|
2008-05-09 21:53:03 +00:00
|
|
|
setTargetDAGCombine(ISD::BUILD_VECTOR);
|
2006-10-04 06:57:07 +00:00
|
|
|
setTargetDAGCombine(ISD::SELECT);
|
2008-02-22 02:09:43 +00:00
|
|
|
setTargetDAGCombine(ISD::STORE);
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
computeRegisterProperties();
|
|
|
|
|
2006-02-14 08:25:08 +00:00
|
|
|
// FIXME: These should be based on subtarget info. Plus, the values should
|
|
|
|
// be smaller when we are in optimizing for size mode.
|
2008-06-30 21:00:56 +00:00
|
|
|
maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
|
|
|
|
maxStoresPerMemcpy = 16; // For @llvm.memcpy -> sequence of stores
|
|
|
|
maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores
|
2005-11-15 00:40:23 +00:00
|
|
|
allowUnalignedMemoryAccesses = true; // x86 supports it!
|
2008-02-28 00:43:03 +00:00
|
|
|
setPrefLoopAlignment(16);
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
|
|
|
|
2008-03-10 15:42:14 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
MVT X86TargetLowering::getSetCCResultType(const SDValue &) const {
|
2008-03-10 15:42:14 +00:00
|
|
|
return MVT::i8;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-01-23 23:17:41 +00:00
|
|
|
/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
|
|
|
|
/// the desired ByVal argument alignment.
|
|
|
|
static void getMaxByValAlign(const Type *Ty, unsigned &MaxAlign) {
|
|
|
|
if (MaxAlign == 16)
|
|
|
|
return;
|
|
|
|
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
|
|
|
|
if (VTy->getBitWidth() == 128)
|
|
|
|
MaxAlign = 16;
|
|
|
|
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
|
|
|
unsigned EltAlign = 0;
|
|
|
|
getMaxByValAlign(ATy->getElementType(), EltAlign);
|
|
|
|
if (EltAlign > MaxAlign)
|
|
|
|
MaxAlign = EltAlign;
|
|
|
|
} else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
|
|
|
|
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
|
|
|
unsigned EltAlign = 0;
|
|
|
|
getMaxByValAlign(STy->getElementType(i), EltAlign);
|
|
|
|
if (EltAlign > MaxAlign)
|
|
|
|
MaxAlign = EltAlign;
|
|
|
|
if (MaxAlign == 16)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
|
|
|
|
/// function arguments in the caller parameter area. For X86, aggregates
|
2008-02-08 19:48:20 +00:00
|
|
|
/// that contain SSE vectors are placed at 16-byte boundaries while the rest
|
|
|
|
/// are at 4-byte boundaries.
|
2008-01-23 23:17:41 +00:00
|
|
|
unsigned X86TargetLowering::getByValTypeAlignment(const Type *Ty) const {
|
2008-08-21 21:00:15 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
// Max of 8 and alignment of type.
|
2008-09-09 18:22:57 +00:00
|
|
|
unsigned TyAlign = TD->getABITypeAlignment(Ty);
|
2008-08-21 21:00:15 +00:00
|
|
|
if (TyAlign > 8)
|
|
|
|
return TyAlign;
|
|
|
|
return 8;
|
|
|
|
}
|
|
|
|
|
2008-01-23 23:17:41 +00:00
|
|
|
unsigned Align = 4;
|
2008-02-08 19:48:20 +00:00
|
|
|
if (Subtarget->hasSSE1())
|
|
|
|
getMaxByValAlign(Ty, Align);
|
2008-01-23 23:17:41 +00:00
|
|
|
return Align;
|
|
|
|
}
|
2007-02-25 08:29:00 +00:00
|
|
|
|
2008-05-15 08:39:06 +00:00
|
|
|
/// getOptimalMemOpType - Returns the target specific optimal type for load
|
2008-05-15 22:13:02 +00:00
|
|
|
/// and store operations as a result of memset, memcpy, and memmove
|
|
|
|
/// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
|
2008-05-15 08:39:06 +00:00
|
|
|
/// determining it.
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT
|
2008-05-15 08:39:06 +00:00
|
|
|
X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
|
|
|
|
bool isSrcConst, bool isSrcStr) const {
|
2008-10-28 05:49:35 +00:00
|
|
|
// FIXME: This turns off use of xmm stores for memset/memcpy on targets like
|
|
|
|
// linux. This is because the stack realignment code can't handle certain
|
|
|
|
// cases like PR2962. This should be removed when PR2962 is fixed.
|
|
|
|
if (Subtarget->getStackAlignment() >= 16) {
|
|
|
|
if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16)
|
|
|
|
return MVT::v4i32;
|
|
|
|
if ((isSrcConst || isSrcStr) && Subtarget->hasSSE1() && Size >= 16)
|
|
|
|
return MVT::v4f32;
|
|
|
|
}
|
2008-05-15 08:39:06 +00:00
|
|
|
if (Subtarget->is64Bit() && Size >= 8)
|
|
|
|
return MVT::i64;
|
|
|
|
return MVT::i32;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Much improved pic jumptable codegen:
Then:
call "L1$pb"
"L1$pb":
popl %eax
...
LBB1_1: # entry
imull $4, %ecx, %ecx
leal LJTI1_0-"L1$pb"(%eax), %edx
addl LJTI1_0-"L1$pb"(%ecx,%eax), %edx
jmpl *%edx
.align 2
.set L1_0_set_3,LBB1_3-LJTI1_0
.set L1_0_set_2,LBB1_2-LJTI1_0
.set L1_0_set_5,LBB1_5-LJTI1_0
.set L1_0_set_4,LBB1_4-LJTI1_0
LJTI1_0:
.long L1_0_set_3
.long L1_0_set_2
Now:
call "L1$pb"
"L1$pb":
popl %eax
...
LBB1_1: # entry
addl LJTI1_0-"L1$pb"(%eax,%ecx,4), %eax
jmpl *%eax
.align 2
.set L1_0_set_3,LBB1_3-"L1$pb"
.set L1_0_set_2,LBB1_2-"L1$pb"
.set L1_0_set_5,LBB1_5-"L1$pb"
.set L1_0_set_4,LBB1_4-"L1$pb"
LJTI1_0:
.long L1_0_set_3
.long L1_0_set_2
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43924 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-09 01:32:10 +00:00
|
|
|
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
|
|
|
|
/// jumptable.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
|
Much improved pic jumptable codegen:
Then:
call "L1$pb"
"L1$pb":
popl %eax
...
LBB1_1: # entry
imull $4, %ecx, %ecx
leal LJTI1_0-"L1$pb"(%eax), %edx
addl LJTI1_0-"L1$pb"(%ecx,%eax), %edx
jmpl *%edx
.align 2
.set L1_0_set_3,LBB1_3-LJTI1_0
.set L1_0_set_2,LBB1_2-LJTI1_0
.set L1_0_set_5,LBB1_5-LJTI1_0
.set L1_0_set_4,LBB1_4-LJTI1_0
LJTI1_0:
.long L1_0_set_3
.long L1_0_set_2
Now:
call "L1$pb"
"L1$pb":
popl %eax
...
LBB1_1: # entry
addl LJTI1_0-"L1$pb"(%eax,%ecx,4), %eax
jmpl *%eax
.align 2
.set L1_0_set_3,LBB1_3-"L1$pb"
.set L1_0_set_2,LBB1_2-"L1$pb"
.set L1_0_set_5,LBB1_5-"L1$pb"
.set L1_0_set_4,LBB1_4-"L1$pb"
LJTI1_0:
.long L1_0_set_3
.long L1_0_set_2
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43924 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-09 01:32:10 +00:00
|
|
|
SelectionDAG &DAG) const {
|
|
|
|
if (usesGlobalOffsetTable())
|
|
|
|
return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy());
|
|
|
|
if (!Subtarget->isPICStyleRIPRel())
|
|
|
|
return DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy());
|
|
|
|
return Table;
|
|
|
|
}
|
|
|
|
|
2007-02-25 08:29:00 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Return Value Calling Convention Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2007-02-28 04:55:35 +00:00
|
|
|
#include "X86GenCallingConv.inc"
|
2007-10-11 19:40:01 +00:00
|
|
|
|
2007-02-25 09:12:39 +00:00
|
|
|
/// LowerRET - Lower an ISD::RET node.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
|
2007-02-25 09:12:39 +00:00
|
|
|
assert((Op.getNumOperands() & 1) == 1 && "ISD::RET should have odd # args");
|
|
|
|
|
2007-02-27 05:28:59 +00:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
|
|
|
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
|
2007-06-19 00:13:10 +00:00
|
|
|
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
|
|
|
|
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
|
2008-08-28 21:40:38 +00:00
|
|
|
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_X86);
|
2007-10-11 19:40:01 +00:00
|
|
|
|
2007-02-25 09:12:39 +00:00
|
|
|
// If this is the first return lowered for this function, add the regs to the
|
|
|
|
// liveout set for the function.
|
2007-12-31 04:13:23 +00:00
|
|
|
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
|
2007-02-27 05:28:59 +00:00
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i)
|
|
|
|
if (RVLocs[i].isRegLoc())
|
2007-12-31 04:13:23 +00:00
|
|
|
DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
|
2007-02-25 09:12:39 +00:00
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
2007-02-25 09:12:39 +00:00
|
|
|
|
2007-10-11 19:40:01 +00:00
|
|
|
// Handle tail call return.
|
2008-04-30 09:16:33 +00:00
|
|
|
Chain = GetPossiblePreceedingTailCall(Chain, X86ISD::TAILCALL);
|
2007-10-11 19:40:01 +00:00
|
|
|
if (Chain.getOpcode() == X86ISD::TAILCALL) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue TailCall = Chain;
|
|
|
|
SDValue TargetAddress = TailCall.getOperand(1);
|
|
|
|
SDValue StackAdjustment = TailCall.getOperand(2);
|
2008-01-16 05:52:18 +00:00
|
|
|
assert(((TargetAddress.getOpcode() == ISD::Register &&
|
2008-09-22 14:50:07 +00:00
|
|
|
(cast<RegisterSDNode>(TargetAddress)->getReg() == X86::EAX ||
|
2007-10-11 19:40:01 +00:00
|
|
|
cast<RegisterSDNode>(TargetAddress)->getReg() == X86::R9)) ||
|
2008-09-16 21:48:12 +00:00
|
|
|
TargetAddress.getOpcode() == ISD::TargetExternalSymbol ||
|
2007-10-11 19:40:01 +00:00
|
|
|
TargetAddress.getOpcode() == ISD::TargetGlobalAddress) &&
|
|
|
|
"Expecting an global address, external symbol, or register");
|
2008-01-16 05:52:18 +00:00
|
|
|
assert(StackAdjustment.getOpcode() == ISD::Constant &&
|
|
|
|
"Expecting a const value");
|
2007-10-11 19:40:01 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue,8> Operands;
|
2007-10-11 19:40:01 +00:00
|
|
|
Operands.push_back(Chain.getOperand(0));
|
|
|
|
Operands.push_back(TargetAddress);
|
|
|
|
Operands.push_back(StackAdjustment);
|
|
|
|
// Copy registers used by the call. Last operand is a flag so it is not
|
|
|
|
// copied.
|
2007-10-16 09:05:00 +00:00
|
|
|
for (unsigned i=3; i < TailCall.getNumOperands()-1; i++) {
|
2007-10-11 19:40:01 +00:00
|
|
|
Operands.push_back(Chain.getOperand(i));
|
|
|
|
}
|
2007-10-16 09:05:00 +00:00
|
|
|
return DAG.getNode(X86ISD::TC_RETURN, MVT::Other, &Operands[0],
|
|
|
|
Operands.size());
|
2007-10-11 19:40:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Regular return.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Flag;
|
2007-10-11 19:40:01 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 6> RetOps;
|
2008-03-11 03:23:40 +00:00
|
|
|
RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
|
|
|
|
// Operand #1 = Bytes To Pop
|
|
|
|
RetOps.push_back(DAG.getConstant(getBytesToPopOnReturn(), MVT::i16));
|
|
|
|
|
2007-02-25 09:12:39 +00:00
|
|
|
// Copy the result values into the output registers.
|
2008-03-10 21:08:41 +00:00
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
|
|
|
CCValAssign &VA = RVLocs[i];
|
|
|
|
assert(VA.isRegLoc() && "Can only return in registers!");
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ValToCopy = Op.getOperand(i*2+1);
|
2007-02-25 09:12:39 +00:00
|
|
|
|
2008-03-11 03:23:40 +00:00
|
|
|
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
|
|
|
|
// the RET instruction and handled by the FP Stackifier.
|
|
|
|
if (RVLocs[i].getLocReg() == X86::ST0 ||
|
|
|
|
RVLocs[i].getLocReg() == X86::ST1) {
|
|
|
|
// If this is a copy from an xmm register to ST(0), use an FPExtend to
|
|
|
|
// change the value to the FP stack register class.
|
|
|
|
if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT()))
|
|
|
|
ValToCopy = DAG.getNode(ISD::FP_EXTEND, MVT::f80, ValToCopy);
|
|
|
|
RetOps.push_back(ValToCopy);
|
|
|
|
// Don't emit a copytoreg.
|
|
|
|
continue;
|
|
|
|
}
|
2008-06-24 22:01:44 +00:00
|
|
|
|
2008-03-10 21:08:41 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, VA.getLocReg(), ValToCopy, Flag);
|
2007-02-25 09:12:39 +00:00
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
}
|
2008-04-21 23:59:07 +00:00
|
|
|
|
|
|
|
// The x86-64 ABI for returning structs by value requires that we copy
|
|
|
|
// the sret argument into %rax for the return. We saved the argument into
|
|
|
|
// a virtual register in the entry block, so now we copy the value out
|
|
|
|
// and into %rax.
|
|
|
|
if (Subtarget->is64Bit() &&
|
|
|
|
DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
|
|
|
unsigned Reg = FuncInfo->getSRetReturnReg();
|
|
|
|
if (!Reg) {
|
|
|
|
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
|
|
|
|
FuncInfo->setSRetReturnReg(Reg);
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Val = DAG.getCopyFromReg(Chain, Reg, getPointerTy());
|
2008-04-21 23:59:07 +00:00
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::RAX, Val, Flag);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
}
|
2007-02-25 09:12:39 +00:00
|
|
|
|
2008-03-11 03:23:40 +00:00
|
|
|
RetOps[0] = Chain; // Update chain.
|
|
|
|
|
|
|
|
// Add the flag if we have it.
|
2008-08-28 21:40:38 +00:00
|
|
|
if (Flag.getNode())
|
2008-03-11 03:23:40 +00:00
|
|
|
RetOps.push_back(Flag);
|
|
|
|
|
|
|
|
return DAG.getNode(X86ISD::RET_FLAG, MVT::Other, &RetOps[0], RetOps.size());
|
2007-02-25 09:12:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-25 08:59:22 +00:00
|
|
|
/// LowerCallResult - Lower the result values of an ISD::CALL into the
|
|
|
|
/// appropriate copies out of appropriate physical registers. This assumes that
|
|
|
|
/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
|
|
|
|
/// being lowered. The returns a SDNode with the same number of values as the
|
|
|
|
/// ISD::CALL.
|
|
|
|
SDNode *X86TargetLowering::
|
2008-09-13 01:54:27 +00:00
|
|
|
LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
|
2007-02-25 08:59:22 +00:00
|
|
|
unsigned CallingConv, SelectionDAG &DAG) {
|
2007-02-28 07:09:55 +00:00
|
|
|
|
|
|
|
// Assign locations to each value returned by this call.
|
2007-02-27 05:28:59 +00:00
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
2008-09-13 01:54:27 +00:00
|
|
|
bool isVarArg = TheCall->isVarArg();
|
2007-06-19 00:13:10 +00:00
|
|
|
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
|
2007-02-28 07:09:55 +00:00
|
|
|
CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> ResultVals;
|
2007-02-25 08:59:22 +00:00
|
|
|
|
|
|
|
// Copy all of the result registers out of their specified physreg.
|
2008-03-10 21:08:41 +00:00
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT CopyVT = RVLocs[i].getValVT();
|
2008-03-10 21:08:41 +00:00
|
|
|
|
|
|
|
// If this is a call to a function that returns an fp value on the floating
|
|
|
|
// point stack, but where we prefer to use the value in xmm registers, copy
|
|
|
|
// it out as F80 and use a truncate to move it from fp stack reg to xmm reg.
|
2008-08-21 19:54:16 +00:00
|
|
|
if ((RVLocs[i].getLocReg() == X86::ST0 ||
|
|
|
|
RVLocs[i].getLocReg() == X86::ST1) &&
|
2008-03-10 21:08:41 +00:00
|
|
|
isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) {
|
|
|
|
CopyVT = MVT::f80;
|
2007-02-25 08:59:22 +00:00
|
|
|
}
|
|
|
|
|
2008-03-10 21:08:41 +00:00
|
|
|
Chain = DAG.getCopyFromReg(Chain, RVLocs[i].getLocReg(),
|
|
|
|
CopyVT, InFlag).getValue(1);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Val = Chain.getValue(0);
|
2008-03-10 21:08:41 +00:00
|
|
|
InFlag = Chain.getValue(2);
|
|
|
|
|
|
|
|
if (CopyVT != RVLocs[i].getValVT()) {
|
|
|
|
// Round the F80 the right size, which also moves to the appropriate xmm
|
|
|
|
// register.
|
|
|
|
Val = DAG.getNode(ISD::FP_ROUND, RVLocs[i].getValVT(), Val,
|
|
|
|
// This truncation won't change the value.
|
|
|
|
DAG.getIntPtrConstant(1));
|
|
|
|
}
|
2007-02-25 08:29:00 +00:00
|
|
|
|
2008-03-10 21:08:41 +00:00
|
|
|
ResultVals.push_back(Val);
|
2007-02-25 08:29:00 +00:00
|
|
|
}
|
2008-07-02 17:40:58 +00:00
|
|
|
|
2007-02-25 08:59:22 +00:00
|
|
|
// Merge everything together with a MERGE_VALUES node.
|
|
|
|
ResultVals.push_back(Chain);
|
2008-06-30 10:19:09 +00:00
|
|
|
return DAG.getMergeValues(TheCall->getVTList(), &ResultVals[0],
|
2008-08-28 21:40:38 +00:00
|
|
|
ResultVals.size()).getNode();
|
2007-02-25 08:29:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-10-11 19:40:01 +00:00
|
|
|
// C & StdCall & Fast Calling Convention implementation
|
2005-11-15 00:40:23 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2007-01-28 13:31:35 +00:00
|
|
|
// StdCall calling convention seems to be standard for many Windows' API
|
|
|
|
// routines and around. It differs from C calling convention just a little:
|
|
|
|
// callee should clean up the stack, not caller. Symbols should be also
|
|
|
|
// decorated in some fancy way :) It doesn't support any vector arguments.
|
2007-10-11 19:40:01 +00:00
|
|
|
// For info on fast calling convention see Fast Calling Convention (tail call)
|
|
|
|
// implementation LowerX86_32FastCCCallTo.
|
2005-11-15 00:40:23 +00:00
|
|
|
|
2006-04-27 05:35:28 +00:00
|
|
|
/// AddLiveIn - This helper function adds the specified physical register to the
|
|
|
|
/// MachineFunction as a live in value. It also creates a corresponding virtual
|
|
|
|
/// register for it.
|
|
|
|
static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
|
2007-01-28 13:31:35 +00:00
|
|
|
const TargetRegisterClass *RC) {
|
2006-04-27 05:35:28 +00:00
|
|
|
assert(RC->contains(PReg) && "Not the correct regclass!");
|
2007-12-31 04:13:23 +00:00
|
|
|
unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
|
|
|
|
MF.getRegInfo().addLiveIn(PReg, VReg);
|
2006-04-27 05:35:28 +00:00
|
|
|
return VReg;
|
|
|
|
}
|
|
|
|
|
2008-02-26 17:50:59 +00:00
|
|
|
/// CallIsStructReturn - Determines whether a CALL node uses struct return
|
|
|
|
/// semantics.
|
2008-09-13 01:54:27 +00:00
|
|
|
static bool CallIsStructReturn(CallSDNode *TheCall) {
|
|
|
|
unsigned NumOps = TheCall->getNumArgs();
|
2008-01-05 16:56:59 +00:00
|
|
|
if (!NumOps)
|
|
|
|
return false;
|
2008-03-21 09:14:45 +00:00
|
|
|
|
2008-09-13 01:54:27 +00:00
|
|
|
return TheCall->getArgFlags(0).isSRet();
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
|
|
|
|
2008-02-26 17:50:59 +00:00
|
|
|
/// ArgsAreStructReturn - Determines whether a FORMAL_ARGUMENTS node uses struct
|
|
|
|
/// return semantics.
|
2008-07-27 21:46:04 +00:00
|
|
|
static bool ArgsAreStructReturn(SDValue Op) {
|
2008-08-28 21:40:38 +00:00
|
|
|
unsigned NumArgs = Op.getNode()->getNumValues() - 1;
|
2008-01-05 16:56:59 +00:00
|
|
|
if (!NumArgs)
|
|
|
|
return false;
|
2008-03-21 09:14:45 +00:00
|
|
|
|
|
|
|
return cast<ARG_FLAGSSDNode>(Op.getOperand(3))->getArgFlags().isSRet();
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
|
|
|
|
2008-04-12 18:11:06 +00:00
|
|
|
/// IsCalleePop - Determines whether a CALL or FORMAL_ARGUMENTS node requires
|
|
|
|
/// the callee to pop its own arguments. Callee pop is necessary to support tail
|
2008-02-26 17:50:59 +00:00
|
|
|
/// calls.
|
2008-09-13 01:54:27 +00:00
|
|
|
bool X86TargetLowering::IsCalleePop(bool IsVarArg, unsigned CallingConv) {
|
2008-01-05 16:56:59 +00:00
|
|
|
if (IsVarArg)
|
|
|
|
return false;
|
|
|
|
|
2008-09-13 01:54:27 +00:00
|
|
|
switch (CallingConv) {
|
2008-01-05 16:56:59 +00:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case CallingConv::X86_StdCall:
|
|
|
|
return !Subtarget->is64Bit();
|
|
|
|
case CallingConv::X86_FastCall:
|
|
|
|
return !Subtarget->is64Bit();
|
|
|
|
case CallingConv::Fast:
|
|
|
|
return PerformTailCallOpt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-13 01:54:27 +00:00
|
|
|
/// CCAssignFnForNode - Selects the correct CCAssignFn for a the
|
|
|
|
/// given CallingConvention value.
|
|
|
|
CCAssignFn *X86TargetLowering::CCAssignFnForNode(unsigned CC) const {
|
2008-02-20 11:22:39 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
2008-03-22 20:57:27 +00:00
|
|
|
if (Subtarget->isTargetWin64())
|
2008-03-22 20:37:30 +00:00
|
|
|
return CC_X86_Win64_C;
|
2008-09-07 09:07:23 +00:00
|
|
|
else if (CC == CallingConv::Fast && PerformTailCallOpt)
|
|
|
|
return CC_X86_64_TailCall;
|
|
|
|
else
|
|
|
|
return CC_X86_64_C;
|
2008-02-20 11:22:39 +00:00
|
|
|
}
|
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
if (CC == CallingConv::X86_FastCall)
|
|
|
|
return CC_X86_32_FastCall;
|
2008-09-10 18:25:29 +00:00
|
|
|
else if (CC == CallingConv::Fast)
|
|
|
|
return CC_X86_32_FastCC;
|
2008-01-05 16:56:59 +00:00
|
|
|
else
|
|
|
|
return CC_X86_32_C;
|
|
|
|
}
|
|
|
|
|
2008-02-26 17:50:59 +00:00
|
|
|
/// NameDecorationForFORMAL_ARGUMENTS - Selects the appropriate decoration to
|
|
|
|
/// apply to a MachineFunction containing a given FORMAL_ARGUMENTS node.
|
2008-01-05 16:56:59 +00:00
|
|
|
NameDecorationStyle
|
2008-07-27 21:46:04 +00:00
|
|
|
X86TargetLowering::NameDecorationForFORMAL_ARGUMENTS(SDValue Op) {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
2008-01-05 16:56:59 +00:00
|
|
|
if (CC == CallingConv::X86_FastCall)
|
|
|
|
return FastCall;
|
|
|
|
else if (CC == CallingConv::X86_StdCall)
|
|
|
|
return StdCall;
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2008-01-11 16:49:42 +00:00
|
|
|
|
2008-02-26 22:21:54 +00:00
|
|
|
/// CallRequiresGOTInRegister - Check whether the call requires the GOT pointer
|
|
|
|
/// in a register before calling.
|
|
|
|
bool X86TargetLowering::CallRequiresGOTPtrInReg(bool Is64Bit, bool IsTailCall) {
|
|
|
|
return !IsTailCall && !Is64Bit &&
|
|
|
|
getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
Subtarget->isPICStyleGOT();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// CallRequiresFnAddressInReg - Check whether the call requires the function
|
|
|
|
/// address to be loaded in a register.
|
|
|
|
bool
|
|
|
|
X86TargetLowering::CallRequiresFnAddressInReg(bool Is64Bit, bool IsTailCall) {
|
|
|
|
return !Is64Bit && IsTailCall &&
|
|
|
|
getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
Subtarget->isPICStyleGOT();
|
|
|
|
}
|
|
|
|
|
2008-02-26 17:50:59 +00:00
|
|
|
/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
|
|
|
|
/// by "Src" to address "Dst" with size and alignment information specified by
|
2008-04-12 18:11:06 +00:00
|
|
|
/// the specific parameter attribute. The copy will be passed as a byval
|
|
|
|
/// function parameter.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue
|
|
|
|
CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
|
2008-03-21 09:14:45 +00:00
|
|
|
ISD::ArgFlagsTy Flags, SelectionDAG &DAG) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
return DAG.getMemcpy(Chain, Dst, Src, SizeNode, Flags.getByValAlign(),
|
2008-04-30 09:16:33 +00:00
|
|
|
/*AlwaysInline=*/true, NULL, 0, NULL, 0);
|
2008-01-11 16:49:42 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerMemArgument(SDValue Op, SelectionDAG &DAG,
|
2007-09-14 15:48:13 +00:00
|
|
|
const CCValAssign &VA,
|
|
|
|
MachineFrameInfo *MFI,
|
2008-02-26 09:19:59 +00:00
|
|
|
unsigned CC,
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Root, unsigned i) {
|
2007-09-14 15:48:13 +00:00
|
|
|
// Create the nodes corresponding to a load from this parameter slot.
|
2008-03-21 09:14:45 +00:00
|
|
|
ISD::ArgFlagsTy Flags =
|
|
|
|
cast<ARG_FLAGSSDNode>(Op.getOperand(3 + i))->getArgFlags();
|
2008-02-26 09:19:59 +00:00
|
|
|
bool AlwaysUseMutable = (CC==CallingConv::Fast) && PerformTailCallOpt;
|
2008-03-21 09:14:45 +00:00
|
|
|
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
|
2008-01-10 02:24:25 +00:00
|
|
|
|
2008-02-26 09:19:59 +00:00
|
|
|
// FIXME: For now, all byval parameter objects are marked mutable. This can be
|
|
|
|
// changed with more analysis.
|
|
|
|
// In case of tail call optimization mark all arguments mutable. Since they
|
|
|
|
// could be overwritten by lowering of arguments in case of a tail call.
|
2008-06-06 12:08:01 +00:00
|
|
|
int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
|
2008-02-26 09:19:59 +00:00
|
|
|
VA.getLocMemOffset(), isImmutable);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
|
2008-03-21 09:14:45 +00:00
|
|
|
if (Flags.isByVal())
|
2007-09-14 15:48:13 +00:00
|
|
|
return FIN;
|
2008-02-06 22:27:42 +00:00
|
|
|
return DAG.getLoad(VA.getValVT(), Root, FIN,
|
2008-07-11 22:44:52 +00:00
|
|
|
PseudoSourceValue::getFixedStack(FI), 0);
|
2007-09-14 15:48:13 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
|
2006-04-26 01:20:17 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2007-08-15 17:12:32 +00:00
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
2007-02-28 05:39:26 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
const Function* Fn = MF.getFunction();
|
|
|
|
if (Fn->hasExternalLinkage() &&
|
|
|
|
Subtarget->isTargetCygMing() &&
|
|
|
|
Fn->getName() == "main")
|
|
|
|
FuncInfo->setForceFramePointer(true);
|
2007-02-28 06:10:12 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
// Decorate the function name.
|
|
|
|
FuncInfo->setDecorationStyle(NameDecorationForFORMAL_ARGUMENTS(Op));
|
|
|
|
|
2006-04-26 01:20:17 +00:00
|
|
|
MachineFrameInfo *MFI = MF.getFrameInfo();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Root = Op.getOperand(0);
|
2008-09-12 16:56:44 +00:00
|
|
|
bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
|
2008-01-03 16:47:34 +00:00
|
|
|
unsigned CC = MF.getFunction()->getCallingConv();
|
2008-01-05 16:56:59 +00:00
|
|
|
bool Is64Bit = Subtarget->is64Bit();
|
2008-04-27 23:15:03 +00:00
|
|
|
bool IsWin64 = Subtarget->isTargetWin64();
|
2008-01-03 16:47:34 +00:00
|
|
|
|
|
|
|
assert(!(isVarArg && CC == CallingConv::Fast) &&
|
|
|
|
"Var args not supported with calling convention fastcc");
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2007-02-28 07:00:42 +00:00
|
|
|
// Assign locations to all of the incoming arguments.
|
2007-02-28 06:10:12 +00:00
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2008-01-03 16:47:34 +00:00
|
|
|
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
|
2008-09-13 01:54:27 +00:00
|
|
|
CCInfo.AnalyzeFormalArguments(Op.getNode(), CCAssignFnForNode(CC));
|
2007-02-28 06:10:12 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> ArgValues;
|
2007-02-28 06:10:12 +00:00
|
|
|
unsigned LastVal = ~0U;
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
// TODO: If an arg is passed in two places (e.g. reg and stack), skip later
|
|
|
|
// places.
|
|
|
|
assert(VA.getValNo() != LastVal &&
|
|
|
|
"Don't support value assigned to multiple locs yet");
|
|
|
|
LastVal = VA.getValNo();
|
2007-01-28 13:31:35 +00:00
|
|
|
|
2007-02-28 06:10:12 +00:00
|
|
|
if (VA.isRegLoc()) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT RegVT = VA.getLocVT();
|
2007-02-28 06:10:12 +00:00
|
|
|
TargetRegisterClass *RC;
|
|
|
|
if (RegVT == MVT::i32)
|
|
|
|
RC = X86::GR32RegisterClass;
|
2008-01-05 16:56:59 +00:00
|
|
|
else if (Is64Bit && RegVT == MVT::i64)
|
2007-02-28 06:10:12 +00:00
|
|
|
RC = X86::GR64RegisterClass;
|
2008-02-05 20:46:33 +00:00
|
|
|
else if (RegVT == MVT::f32)
|
2007-02-28 06:10:12 +00:00
|
|
|
RC = X86::FR32RegisterClass;
|
2008-02-05 20:46:33 +00:00
|
|
|
else if (RegVT == MVT::f64)
|
2007-02-28 06:10:12 +00:00
|
|
|
RC = X86::FR64RegisterClass;
|
2008-06-06 12:08:01 +00:00
|
|
|
else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
|
2008-04-25 07:56:45 +00:00
|
|
|
RC = X86::VR128RegisterClass;
|
2008-06-06 12:08:01 +00:00
|
|
|
else if (RegVT.isVector()) {
|
|
|
|
assert(RegVT.getSizeInBits() == 64);
|
2008-04-25 07:56:45 +00:00
|
|
|
if (!Is64Bit)
|
|
|
|
RC = X86::VR64RegisterClass; // MMX values are passed in MMXs.
|
|
|
|
else {
|
|
|
|
// Darwin calling convention passes MMX values in either GPRs or
|
|
|
|
// XMMs in x86-64. Other targets pass them in memory.
|
|
|
|
if (RegVT != MVT::v1i64 && Subtarget->hasSSE2()) {
|
|
|
|
RC = X86::VR128RegisterClass; // MMX values are passed in XMMs.
|
|
|
|
RegVT = MVT::v2i64;
|
|
|
|
} else {
|
|
|
|
RC = X86::GR64RegisterClass; // v1i64 values are passed in GPRs.
|
|
|
|
RegVT = MVT::i64;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(0 && "Unknown argument type!");
|
2007-01-28 13:31:35 +00:00
|
|
|
}
|
2007-03-02 05:12:29 +00:00
|
|
|
|
|
|
|
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), VA.getLocReg(), RC);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ArgValue = DAG.getCopyFromReg(Root, Reg, RegVT);
|
2007-02-28 06:10:12 +00:00
|
|
|
|
|
|
|
// If this is an 8 or 16-bit value, it is really passed promoted to 32
|
|
|
|
// bits. Insert an assert[sz]ext to capture this, then truncate to the
|
|
|
|
// right size.
|
|
|
|
if (VA.getLocInfo() == CCValAssign::SExt)
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertSext, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
else if (VA.getLocInfo() == CCValAssign::ZExt)
|
|
|
|
ArgValue = DAG.getNode(ISD::AssertZext, RegVT, ArgValue,
|
|
|
|
DAG.getValueType(VA.getValVT()));
|
|
|
|
|
|
|
|
if (VA.getLocInfo() != CCValAssign::Full)
|
|
|
|
ArgValue = DAG.getNode(ISD::TRUNCATE, VA.getValVT(), ArgValue);
|
|
|
|
|
2007-06-09 05:08:10 +00:00
|
|
|
// Handle MMX values passed in GPRs.
|
2008-04-25 20:13:28 +00:00
|
|
|
if (Is64Bit && RegVT != VA.getLocVT()) {
|
2008-06-06 12:08:01 +00:00
|
|
|
if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass)
|
2008-04-25 20:13:28 +00:00
|
|
|
ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
|
|
|
|
else if (RC == X86::VR128RegisterClass) {
|
|
|
|
ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i64, ArgValue,
|
|
|
|
DAG.getConstant(0, MVT::i64));
|
|
|
|
ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
|
|
|
|
}
|
|
|
|
}
|
2007-06-09 05:08:10 +00:00
|
|
|
|
2007-02-28 06:10:12 +00:00
|
|
|
ArgValues.push_back(ArgValue);
|
|
|
|
} else {
|
|
|
|
assert(VA.isMemLoc());
|
2008-02-26 09:19:59 +00:00
|
|
|
ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, CC, Root, i));
|
2007-02-28 06:10:12 +00:00
|
|
|
}
|
|
|
|
}
|
2008-01-03 16:47:34 +00:00
|
|
|
|
2008-04-21 23:59:07 +00:00
|
|
|
// The x86-64 ABI for returning structs by value requires that we copy
|
|
|
|
// the sret argument into %rax for the return. Save the argument into
|
|
|
|
// a virtual register so that we can access it from the return points.
|
|
|
|
if (Is64Bit && DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
|
|
|
unsigned Reg = FuncInfo->getSRetReturnReg();
|
|
|
|
if (!Reg) {
|
|
|
|
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
|
|
|
|
FuncInfo->setSRetReturnReg(Reg);
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), Reg, ArgValues[0]);
|
2008-04-21 23:59:07 +00:00
|
|
|
Root = DAG.getNode(ISD::TokenFactor, MVT::Other, Copy, Root);
|
|
|
|
}
|
|
|
|
|
2007-02-28 06:10:12 +00:00
|
|
|
unsigned StackSize = CCInfo.getNextStackOffset();
|
2008-01-03 16:47:34 +00:00
|
|
|
// align stack specially for tail calls
|
2008-09-07 09:07:23 +00:00
|
|
|
if (PerformTailCallOpt && CC == CallingConv::Fast)
|
2008-01-03 16:47:34 +00:00
|
|
|
StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
|
|
|
|
|
2007-02-28 06:10:12 +00:00
|
|
|
// If the function takes variable number of arguments, make a frame index for
|
|
|
|
// the start of the first vararg value... for expansion of llvm.va_start.
|
|
|
|
if (isVarArg) {
|
2008-01-05 16:56:59 +00:00
|
|
|
if (Is64Bit || CC != CallingConv::X86_FastCall) {
|
|
|
|
VarArgsFrameIndex = MFI->CreateFixedObject(1, StackSize);
|
2006-04-27 01:32:22 +00:00
|
|
|
}
|
2008-01-05 16:56:59 +00:00
|
|
|
if (Is64Bit) {
|
2008-04-27 23:15:03 +00:00
|
|
|
unsigned TotalNumIntRegs = 0, TotalNumXMMRegs = 0;
|
|
|
|
|
|
|
|
// FIXME: We should really autogenerate these arrays
|
|
|
|
static const unsigned GPR64ArgRegsWin64[] = {
|
|
|
|
X86::RCX, X86::RDX, X86::R8, X86::R9
|
|
|
|
};
|
|
|
|
static const unsigned XMMArgRegsWin64[] = {
|
|
|
|
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3
|
2008-01-05 16:56:59 +00:00
|
|
|
};
|
2008-04-27 23:15:03 +00:00
|
|
|
static const unsigned GPR64ArgRegs64Bit[] = {
|
|
|
|
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
|
|
|
|
};
|
|
|
|
static const unsigned XMMArgRegs64Bit[] = {
|
2008-01-05 16:56:59 +00:00
|
|
|
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
|
|
|
|
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
|
|
|
|
};
|
2008-04-27 23:15:03 +00:00
|
|
|
const unsigned *GPR64ArgRegs, *XMMArgRegs;
|
|
|
|
|
|
|
|
if (IsWin64) {
|
|
|
|
TotalNumIntRegs = 4; TotalNumXMMRegs = 4;
|
|
|
|
GPR64ArgRegs = GPR64ArgRegsWin64;
|
|
|
|
XMMArgRegs = XMMArgRegsWin64;
|
|
|
|
} else {
|
|
|
|
TotalNumIntRegs = 6; TotalNumXMMRegs = 8;
|
|
|
|
GPR64ArgRegs = GPR64ArgRegs64Bit;
|
|
|
|
XMMArgRegs = XMMArgRegs64Bit;
|
|
|
|
}
|
|
|
|
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
|
|
|
|
TotalNumIntRegs);
|
|
|
|
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs,
|
|
|
|
TotalNumXMMRegs);
|
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
// For X86-64, if there are vararg parameters that are passed via
|
|
|
|
// registers, then we must store them to their spots on the stack so they
|
|
|
|
// may be loaded by deferencing the result of va_next.
|
|
|
|
VarArgsGPOffset = NumIntRegs * 8;
|
2008-04-27 23:15:03 +00:00
|
|
|
VarArgsFPOffset = TotalNumIntRegs * 8 + NumXMMRegs * 16;
|
|
|
|
RegSaveFrameIndex = MFI->CreateStackObject(TotalNumIntRegs * 8 +
|
|
|
|
TotalNumXMMRegs * 16, 16);
|
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
// Store the integer parameter registers.
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MemOps;
|
|
|
|
SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
|
|
|
|
SDValue FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(VarArgsGPOffset));
|
2008-04-27 23:15:03 +00:00
|
|
|
for (; NumIntRegs != TotalNumIntRegs; ++NumIntRegs) {
|
2008-01-05 16:56:59 +00:00
|
|
|
unsigned VReg = AddLiveIn(MF, GPR64ArgRegs[NumIntRegs],
|
|
|
|
X86::GR64RegisterClass);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::i64);
|
|
|
|
SDValue Store =
|
2008-02-06 22:27:42 +00:00
|
|
|
DAG.getStore(Val.getValue(1), Val, FIN,
|
2008-07-11 22:44:52 +00:00
|
|
|
PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
|
2008-01-05 16:56:59 +00:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(8));
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
2008-04-27 23:15:03 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
// Now store the XMM (fp + vector) parameter registers.
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), RSFIN,
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(VarArgsFPOffset));
|
2008-04-27 23:15:03 +00:00
|
|
|
for (; NumXMMRegs != TotalNumXMMRegs; ++NumXMMRegs) {
|
2008-01-05 16:56:59 +00:00
|
|
|
unsigned VReg = AddLiveIn(MF, XMMArgRegs[NumXMMRegs],
|
|
|
|
X86::VR128RegisterClass);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Val = DAG.getCopyFromReg(Root, VReg, MVT::v4f32);
|
|
|
|
SDValue Store =
|
2008-02-06 22:27:42 +00:00
|
|
|
DAG.getStore(Val.getValue(1), Val, FIN,
|
2008-07-11 22:44:52 +00:00
|
|
|
PseudoSourceValue::getFixedStack(RegSaveFrameIndex), 0);
|
2008-01-05 16:56:59 +00:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN,
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(16));
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
|
|
|
if (!MemOps.empty())
|
|
|
|
Root = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&MemOps[0], MemOps.size());
|
2007-02-28 06:10:12 +00:00
|
|
|
}
|
2006-04-26 01:20:17 +00:00
|
|
|
}
|
2008-01-05 16:56:59 +00:00
|
|
|
|
2006-05-23 21:06:34 +00:00
|
|
|
ArgValues.push_back(Root);
|
2008-01-03 16:47:34 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
// Some CCs need callee pop.
|
2008-09-13 01:54:27 +00:00
|
|
|
if (IsCalleePop(isVarArg, CC)) {
|
2008-01-03 16:47:34 +00:00
|
|
|
BytesToPopOnReturn = StackSize; // Callee pops everything.
|
2007-10-11 19:40:01 +00:00
|
|
|
BytesCallerReserves = 0;
|
|
|
|
} else {
|
2008-01-03 16:47:34 +00:00
|
|
|
BytesToPopOnReturn = 0; // Callee pops nothing.
|
2008-01-05 16:56:59 +00:00
|
|
|
// If this is an sret function, the return should pop the hidden pointer.
|
2008-09-10 18:25:29 +00:00
|
|
|
if (!Is64Bit && CC != CallingConv::Fast && ArgsAreStructReturn(Op))
|
2008-01-05 16:56:59 +00:00
|
|
|
BytesToPopOnReturn = 4;
|
2007-10-11 19:40:01 +00:00
|
|
|
BytesCallerReserves = StackSize;
|
|
|
|
}
|
2008-01-05 16:56:59 +00:00
|
|
|
|
|
|
|
if (!Is64Bit) {
|
|
|
|
RegSaveFrameIndex = 0xAAAAAAA; // RegSaveFrameIndex is X86-64 only.
|
|
|
|
if (CC == CallingConv::X86_FastCall)
|
|
|
|
VarArgsFrameIndex = 0xAAAAAAA; // fastcc functions can't have varargs.
|
|
|
|
}
|
|
|
|
|
2007-08-15 17:12:32 +00:00
|
|
|
FuncInfo->setBytesToPopOnReturn(BytesToPopOnReturn);
|
|
|
|
|
2006-05-23 21:06:34 +00:00
|
|
|
// Return the new list of results.
|
2008-08-28 21:40:38 +00:00
|
|
|
return DAG.getMergeValues(Op.getNode()->getVTList(), &ArgValues[0],
|
2008-08-26 22:36:50 +00:00
|
|
|
ArgValues.size()).getValue(Op.getResNo());
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
2008-09-13 01:54:27 +00:00
|
|
|
X86TargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
|
2008-07-27 21:46:04 +00:00
|
|
|
const SDValue &StackPtr,
|
2008-01-10 00:09:10 +00:00
|
|
|
const CCValAssign &VA,
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain,
|
2008-09-13 01:54:27 +00:00
|
|
|
SDValue Arg, ISD::ArgFlagsTy Flags) {
|
2008-02-07 16:28:05 +00:00
|
|
|
unsigned LocMemOffset = VA.getLocMemOffset();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
|
2008-01-10 00:09:10 +00:00
|
|
|
PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
|
2008-03-21 09:14:45 +00:00
|
|
|
if (Flags.isByVal()) {
|
2008-01-12 01:08:07 +00:00
|
|
|
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG);
|
2008-01-10 00:09:10 +00:00
|
|
|
}
|
2008-02-07 16:28:05 +00:00
|
|
|
return DAG.getStore(Chain, Arg, PtrOff,
|
2008-02-07 18:41:25 +00:00
|
|
|
PseudoSourceValue::getStack(), LocMemOffset);
|
2008-01-10 00:09:10 +00:00
|
|
|
}
|
|
|
|
|
2008-04-12 18:11:06 +00:00
|
|
|
/// EmitTailCallLoadRetAddr - Emit a load of return adress if tail call
|
|
|
|
/// optimization is performed and it is required.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
2008-04-12 18:11:06 +00:00
|
|
|
X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue &OutRetAddr,
|
|
|
|
SDValue Chain,
|
2008-04-12 18:11:06 +00:00
|
|
|
bool IsTailCall,
|
|
|
|
bool Is64Bit,
|
|
|
|
int FPDiff) {
|
|
|
|
if (!IsTailCall || FPDiff==0) return Chain;
|
|
|
|
|
|
|
|
// Adjust the Return address stack slot.
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = getPointerTy();
|
2008-04-12 18:11:06 +00:00
|
|
|
OutRetAddr = getReturnAddressFrameIndex(DAG);
|
|
|
|
// Load the "old" Return address.
|
|
|
|
OutRetAddr = DAG.getLoad(VT, Chain,OutRetAddr, NULL, 0);
|
2008-08-28 21:40:38 +00:00
|
|
|
return SDValue(OutRetAddr.getNode(), 1);
|
2008-04-12 18:11:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call
|
|
|
|
/// optimization is performed and it is required (FPDiff!=0).
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue
|
2008-04-12 18:11:06 +00:00
|
|
|
EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF,
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain, SDValue RetAddrFrIdx,
|
2008-04-12 18:11:06 +00:00
|
|
|
bool Is64Bit, int FPDiff) {
|
|
|
|
// Store the return address to the appropriate stack slot.
|
|
|
|
if (!FPDiff) return Chain;
|
|
|
|
// Calculate the new stack slot for the return address.
|
|
|
|
int SlotSize = Is64Bit ? 8 : 4;
|
|
|
|
int NewReturnAddrFI =
|
|
|
|
MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Is64Bit ? MVT::i64 : MVT::i32;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
|
2008-04-12 18:11:06 +00:00
|
|
|
Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx,
|
2008-07-11 22:44:52 +00:00
|
|
|
PseudoSourceValue::getFixedStack(NewReturnAddrFI), 0);
|
2008-04-12 18:11:06 +00:00
|
|
|
return Chain;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
2008-01-05 16:56:59 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2008-09-13 01:54:27 +00:00
|
|
|
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
|
|
|
|
SDValue Chain = TheCall->getChain();
|
|
|
|
unsigned CC = TheCall->getCallingConv();
|
|
|
|
bool isVarArg = TheCall->isVarArg();
|
|
|
|
bool IsTailCall = TheCall->isTailCall() &&
|
|
|
|
CC == CallingConv::Fast && PerformTailCallOpt;
|
|
|
|
SDValue Callee = TheCall->getCallee();
|
2008-01-05 16:56:59 +00:00
|
|
|
bool Is64Bit = Subtarget->is64Bit();
|
2008-09-13 01:54:27 +00:00
|
|
|
bool IsStructRet = CallIsStructReturn(TheCall);
|
2008-01-03 16:47:34 +00:00
|
|
|
|
|
|
|
assert(!(isVarArg && CC == CallingConv::Fast) &&
|
|
|
|
"Var args not supported with calling convention fastcc");
|
|
|
|
|
2007-02-28 07:00:42 +00:00
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
2007-02-28 06:10:12 +00:00
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2007-06-19 00:13:10 +00:00
|
|
|
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
|
2008-09-13 01:54:27 +00:00
|
|
|
CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC));
|
2008-01-03 16:47:34 +00:00
|
|
|
|
2007-02-28 06:10:12 +00:00
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
2008-09-11 20:28:43 +00:00
|
|
|
if (PerformTailCallOpt && CC == CallingConv::Fast)
|
2008-01-03 16:47:34 +00:00
|
|
|
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
|
2007-10-11 19:40:01 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
int FPDiff = 0;
|
|
|
|
if (IsTailCall) {
|
|
|
|
// Lower arguments at fp - stackoffset + fpdiff.
|
|
|
|
unsigned NumBytesCallerPushed =
|
|
|
|
MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn();
|
|
|
|
FPDiff = NumBytesCallerPushed - NumBytes;
|
|
|
|
|
|
|
|
// Set the delta of movement of the returnaddr stackslot.
|
|
|
|
// But only set if delta is greater than previous delta.
|
|
|
|
if (FPDiff < (MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta()))
|
|
|
|
MF.getInfo<X86MachineFunctionInfo>()->setTCReturnAddrDelta(FPDiff);
|
|
|
|
}
|
|
|
|
|
2008-10-11 22:08:30 +00:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
|
2006-09-20 22:03:51 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue RetAddrFrIdx;
|
2008-04-12 18:11:06 +00:00
|
|
|
// Load return adress for tail calls.
|
|
|
|
Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, IsTailCall, Is64Bit,
|
|
|
|
FPDiff);
|
2008-01-05 16:56:59 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
|
|
|
|
SmallVector<SDValue, 8> MemOpChains;
|
|
|
|
SDValue StackPtr;
|
2008-01-03 16:47:34 +00:00
|
|
|
|
2008-04-30 09:16:33 +00:00
|
|
|
// Walk the register/memloc assignments, inserting copies/loads. In the case
|
|
|
|
// of tail call optimization arguments are handle later.
|
2007-02-28 06:10:12 +00:00
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
2008-09-13 01:54:27 +00:00
|
|
|
SDValue Arg = TheCall->getArg(i);
|
|
|
|
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
|
|
|
|
bool isByVal = Flags.isByVal();
|
2008-04-12 18:11:06 +00:00
|
|
|
|
2007-02-28 06:10:12 +00:00
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
|
|
|
default: assert(0 && "Unknown loc info!");
|
|
|
|
case CCValAssign::Full: break;
|
|
|
|
case CCValAssign::SExt:
|
|
|
|
Arg = DAG.getNode(ISD::SIGN_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::ZExt:
|
|
|
|
Arg = DAG.getNode(ISD::ZERO_EXTEND, VA.getLocVT(), Arg);
|
|
|
|
break;
|
|
|
|
case CCValAssign::AExt:
|
|
|
|
Arg = DAG.getNode(ISD::ANY_EXTEND, VA.getLocVT(), Arg);
|
2006-09-20 22:03:51 +00:00
|
|
|
break;
|
|
|
|
}
|
2007-02-28 06:10:12 +00:00
|
|
|
|
|
|
|
if (VA.isRegLoc()) {
|
2008-04-25 19:11:04 +00:00
|
|
|
if (Is64Bit) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT RegVT = VA.getLocVT();
|
|
|
|
if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
|
2008-04-25 19:11:04 +00:00
|
|
|
switch (VA.getLocReg()) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX:
|
|
|
|
case X86::R8: {
|
|
|
|
// Special case: passing MMX values in GPR registers.
|
|
|
|
Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3:
|
|
|
|
case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: {
|
|
|
|
// Special case: passing MMX values in XMM registers.
|
|
|
|
Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg);
|
|
|
|
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Arg);
|
|
|
|
Arg = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64,
|
|
|
|
DAG.getNode(ISD::UNDEF, MVT::v2i64), Arg,
|
|
|
|
getMOVLMask(2, DAG));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-02-28 06:10:12 +00:00
|
|
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
|
|
|
} else {
|
2008-04-12 18:11:06 +00:00
|
|
|
if (!IsTailCall || (IsTailCall && isByVal)) {
|
2008-01-11 16:49:42 +00:00
|
|
|
assert(VA.isMemLoc());
|
2008-08-28 21:40:38 +00:00
|
|
|
if (StackPtr.getNode() == 0)
|
2008-01-11 16:49:42 +00:00
|
|
|
StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy());
|
|
|
|
|
2008-09-13 01:54:27 +00:00
|
|
|
MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
|
|
|
|
Chain, Arg, Flags));
|
2008-01-11 16:49:42 +00:00
|
|
|
}
|
2006-09-20 22:03:51 +00:00
|
|
|
}
|
|
|
|
}
|
2007-02-28 06:10:12 +00:00
|
|
|
|
2006-09-20 22:03:51 +00:00
|
|
|
if (!MemOpChains.empty())
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
|
|
|
&MemOpChains[0], MemOpChains.size());
|
|
|
|
|
2007-01-28 13:31:35 +00:00
|
|
|
// Build a sequence of copy-to-reg nodes chained together with token chain
|
|
|
|
// and flag operands which copy the outgoing args into registers.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InFlag;
|
2008-04-30 09:16:33 +00:00
|
|
|
// Tail call byval lowering might overwrite argument registers so in case of
|
|
|
|
// tail call optimization the copies to registers are lowered later.
|
|
|
|
if (!IsTailCall)
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
2007-01-28 13:31:35 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
// ELF / PIC requires GOT in the EBX register before function calls via PLT
|
2008-02-26 10:21:54 +00:00
|
|
|
// GOT pointer.
|
2008-02-26 22:21:54 +00:00
|
|
|
if (CallRequiresGOTPtrInReg(Is64Bit, IsTailCall)) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::EBX,
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
2008-02-26 10:21:54 +00:00
|
|
|
// If we are tail calling and generating PIC/GOT style code load the address
|
|
|
|
// of the callee into ecx. The value in ecx is used as target of the tail
|
|
|
|
// jump. This is done to circumvent the ebx/callee-saved problem for tail
|
|
|
|
// calls on PIC/GOT architectures. Normally we would just put the address of
|
|
|
|
// GOT into ebx and then call target@PLT. But for tail callss ebx would be
|
|
|
|
// restored (since ebx is callee saved) before jumping to the target@PLT.
|
2008-02-26 22:21:54 +00:00
|
|
|
if (CallRequiresFnAddressInReg(Is64Bit, IsTailCall)) {
|
2008-02-26 10:21:54 +00:00
|
|
|
// Note: The actual moving to ecx is done further down.
|
|
|
|
GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
|
2008-09-24 00:05:32 +00:00
|
|
|
if (G && !G->getGlobal()->hasHiddenVisibility() &&
|
2008-02-26 10:21:54 +00:00
|
|
|
!G->getGlobal()->hasProtectedVisibility())
|
|
|
|
Callee = LowerGlobalAddress(Callee, DAG);
|
2008-09-16 21:48:12 +00:00
|
|
|
else if (isa<ExternalSymbolSDNode>(Callee))
|
|
|
|
Callee = LowerExternalSymbol(Callee,DAG);
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (Is64Bit && isVarArg) {
|
2007-02-28 06:10:12 +00:00
|
|
|
// From AMD64 ABI document:
|
|
|
|
// For calls that may call functions that use varargs or stdargs
|
|
|
|
// (prototype-less calls or calls to functions containing ellipsis (...) in
|
|
|
|
// the declaration) %al is used as hidden argument to specify the number
|
|
|
|
// of SSE registers used. The contents of %al do not need to match exactly
|
|
|
|
// the number of registers, but must be an ubound on the number of SSE
|
|
|
|
// registers used and is in the range 0 - 8 inclusive.
|
2008-04-27 23:15:03 +00:00
|
|
|
|
|
|
|
// FIXME: Verify this on Win64
|
2007-02-28 06:10:12 +00:00
|
|
|
// Count the number of XMM registers allocated.
|
|
|
|
static const unsigned XMMArgRegs[] = {
|
|
|
|
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
|
|
|
|
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
|
|
|
|
};
|
|
|
|
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
|
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::AL,
|
|
|
|
DAG.getConstant(NumXMMRegs, MVT::i8), InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
|
|
|
|
2008-02-26 09:19:59 +00:00
|
|
|
|
2008-01-11 16:49:42 +00:00
|
|
|
// For tail calls lower the arguments to the 'real' stack slot.
|
2008-01-05 16:56:59 +00:00
|
|
|
if (IsTailCall) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MemOpChains2;
|
|
|
|
SDValue FIN;
|
2008-01-05 16:56:59 +00:00
|
|
|
int FI = 0;
|
2008-02-26 09:19:59 +00:00
|
|
|
// Do not flag preceeding copytoreg stuff together with the following stuff.
|
2008-07-27 21:46:04 +00:00
|
|
|
InFlag = SDValue();
|
2008-01-05 16:56:59 +00:00
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
if (!VA.isRegLoc()) {
|
2008-01-11 16:49:42 +00:00
|
|
|
assert(VA.isMemLoc());
|
2008-09-13 01:54:27 +00:00
|
|
|
SDValue Arg = TheCall->getArg(i);
|
|
|
|
ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
|
2008-01-05 16:56:59 +00:00
|
|
|
// Create frame index.
|
|
|
|
int32_t Offset = VA.getLocMemOffset()+FPDiff;
|
2008-06-06 12:08:01 +00:00
|
|
|
uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
|
2008-01-05 16:56:59 +00:00
|
|
|
FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
|
2008-04-12 18:11:06 +00:00
|
|
|
FIN = DAG.getFrameIndex(FI, getPointerTy());
|
2008-01-11 16:49:42 +00:00
|
|
|
|
2008-03-21 09:14:45 +00:00
|
|
|
if (Flags.isByVal()) {
|
2008-01-12 01:08:07 +00:00
|
|
|
// Copy relative to framepointer.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
|
2008-08-28 21:40:38 +00:00
|
|
|
if (StackPtr.getNode() == 0)
|
2008-04-12 18:11:06 +00:00
|
|
|
StackPtr = DAG.getCopyFromReg(Chain, X86StackPtr, getPointerTy());
|
|
|
|
Source = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, Source);
|
|
|
|
|
|
|
|
MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, Chain,
|
2008-01-12 01:08:07 +00:00
|
|
|
Flags, DAG));
|
2008-01-05 16:56:59 +00:00
|
|
|
} else {
|
2008-01-12 01:08:07 +00:00
|
|
|
// Store relative to framepointer.
|
2008-02-06 22:27:42 +00:00
|
|
|
MemOpChains2.push_back(
|
2008-02-26 09:19:59 +00:00
|
|
|
DAG.getStore(Chain, Arg, FIN,
|
2008-07-11 22:44:52 +00:00
|
|
|
PseudoSourceValue::getFixedStack(FI), 0));
|
2008-01-11 16:49:42 +00:00
|
|
|
}
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!MemOpChains2.empty())
|
|
|
|
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
|
2008-01-11 14:34:56 +00:00
|
|
|
&MemOpChains2[0], MemOpChains2.size());
|
2008-01-05 16:56:59 +00:00
|
|
|
|
2008-04-30 09:16:33 +00:00
|
|
|
// Copy arguments to their registers.
|
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
|
|
|
Chain = DAG.getCopyToReg(Chain, RegsToPass[i].first, RegsToPass[i].second,
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
InFlag =SDValue();
|
2008-04-12 18:11:06 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
// Store the return address to the appropriate stack slot.
|
2008-04-12 18:11:06 +00:00
|
|
|
Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit,
|
|
|
|
FPDiff);
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
|
|
|
|
2006-09-20 22:03:51 +00:00
|
|
|
// If the callee is a GlobalAddress node (quite common, every direct call is)
|
|
|
|
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
|
2006-11-20 10:46:14 +00:00
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
2006-12-22 22:29:05 +00:00
|
|
|
// We should use extra load for direct calls to dllimported functions in
|
|
|
|
// non-JIT mode.
|
2008-07-16 01:34:02 +00:00
|
|
|
if (!Subtarget->GVRequiresExtraLoad(G->getGlobal(),
|
|
|
|
getTargetMachine(), true))
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), getPointerTy(),
|
|
|
|
G->getOffset());
|
2008-09-16 21:48:12 +00:00
|
|
|
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
|
|
|
|
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
|
2008-01-05 16:56:59 +00:00
|
|
|
} else if (IsTailCall) {
|
2008-09-22 14:50:07 +00:00
|
|
|
unsigned Opc = Is64Bit ? X86::R9 : X86::EAX;
|
2008-01-05 16:56:59 +00:00
|
|
|
|
|
|
|
Chain = DAG.getCopyToReg(Chain,
|
2008-02-26 10:21:54 +00:00
|
|
|
DAG.getRegister(Opc, getPointerTy()),
|
2008-01-05 16:56:59 +00:00
|
|
|
Callee,InFlag);
|
|
|
|
Callee = DAG.getRegister(Opc, getPointerTy());
|
|
|
|
// Add register as live out.
|
|
|
|
DAG.getMachineFunction().getRegInfo().addLiveOut(Opc);
|
2008-01-03 16:47:34 +00:00
|
|
|
}
|
|
|
|
|
2007-02-25 06:40:16 +00:00
|
|
|
// Returns a chain & a flag for retval copy to use.
|
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> Ops;
|
2008-01-05 16:56:59 +00:00
|
|
|
|
|
|
|
if (IsTailCall) {
|
|
|
|
Ops.push_back(Chain);
|
2008-10-11 22:08:30 +00:00
|
|
|
Ops.push_back(DAG.getIntPtrConstant(NumBytes, true));
|
|
|
|
Ops.push_back(DAG.getIntPtrConstant(0, true));
|
2008-08-28 21:40:38 +00:00
|
|
|
if (InFlag.getNode())
|
2008-01-05 16:56:59 +00:00
|
|
|
Ops.push_back(InFlag);
|
|
|
|
Chain = DAG.getNode(ISD::CALLSEQ_END, NodeTys, &Ops[0], Ops.size());
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
// Returns a chain & a flag for retval copy to use.
|
|
|
|
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
|
|
|
Ops.clear();
|
|
|
|
}
|
|
|
|
|
2006-09-20 22:03:51 +00:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(Callee);
|
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
if (IsTailCall)
|
|
|
|
Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
|
|
|
|
|
2007-01-28 13:31:35 +00:00
|
|
|
// Add argument registers to the end of the list so that they are known live
|
|
|
|
// into the call.
|
2008-01-07 23:08:23 +00:00
|
|
|
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
|
|
|
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
|
|
|
RegsToPass[i].second.getValueType()));
|
2008-01-05 16:56:59 +00:00
|
|
|
|
2008-03-18 23:36:35 +00:00
|
|
|
// Add an implicit use GOT pointer in EBX.
|
|
|
|
if (!IsTailCall && !Is64Bit &&
|
|
|
|
getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
Subtarget->isPICStyleGOT())
|
|
|
|
Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy()));
|
|
|
|
|
|
|
|
// Add an implicit use of AL for x86 vararg functions.
|
|
|
|
if (Is64Bit && isVarArg)
|
|
|
|
Ops.push_back(DAG.getRegister(X86::AL, MVT::i8));
|
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (InFlag.getNode())
|
2007-01-28 13:31:35 +00:00
|
|
|
Ops.push_back(InFlag);
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
if (IsTailCall) {
|
2008-08-28 21:40:38 +00:00
|
|
|
assert(InFlag.getNode() &&
|
2008-01-05 16:56:59 +00:00
|
|
|
"Flag must be set. Depend on flag being set in LowerRET");
|
|
|
|
Chain = DAG.getNode(X86ISD::TAILCALL,
|
2008-09-13 01:54:27 +00:00
|
|
|
TheCall->getVTList(), &Ops[0], Ops.size());
|
2008-01-05 16:56:59 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
return SDValue(Chain.getNode(), Op.getResNo());
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
|
|
|
|
2008-01-03 16:47:34 +00:00
|
|
|
Chain = DAG.getNode(X86ISD::CALL, NodeTys, &Ops[0], Ops.size());
|
2007-01-28 13:31:35 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
2008-01-03 16:47:34 +00:00
|
|
|
|
|
|
|
// Create the CALLSEQ_END node.
|
2008-01-05 16:56:59 +00:00
|
|
|
unsigned NumBytesForCalleeToPush;
|
2008-09-13 01:54:27 +00:00
|
|
|
if (IsCalleePop(isVarArg, CC))
|
2008-01-05 16:56:59 +00:00
|
|
|
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
|
2008-09-10 18:25:29 +00:00
|
|
|
else if (!Is64Bit && CC != CallingConv::Fast && IsStructRet)
|
2008-01-05 16:56:59 +00:00
|
|
|
// If this is is a call to a struct-return function, the callee
|
|
|
|
// pops the hidden struct pointer, so we have to push it back.
|
|
|
|
// This is common for Darwin/X86, Linux & Mingw32 targets.
|
|
|
|
NumBytesForCalleeToPush = 4;
|
|
|
|
else
|
2007-10-11 19:40:01 +00:00
|
|
|
NumBytesForCalleeToPush = 0; // Callee pops nothing.
|
2008-01-05 16:56:59 +00:00
|
|
|
|
2007-02-25 07:18:38 +00:00
|
|
|
// Returns a flag for retval copy to use.
|
2008-01-03 16:47:34 +00:00
|
|
|
Chain = DAG.getCALLSEQ_END(Chain,
|
2008-10-11 22:08:30 +00:00
|
|
|
DAG.getIntPtrConstant(NumBytes, true),
|
|
|
|
DAG.getIntPtrConstant(NumBytesForCalleeToPush,
|
|
|
|
true),
|
2008-01-03 16:47:34 +00:00
|
|
|
InFlag);
|
2007-02-25 09:10:05 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
2008-01-03 16:47:34 +00:00
|
|
|
|
2007-02-25 09:10:05 +00:00
|
|
|
// Handle result values, copying them out of physregs into vregs that we
|
|
|
|
// return.
|
2008-09-13 01:54:27 +00:00
|
|
|
return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG),
|
2008-08-28 23:19:51 +00:00
|
|
|
Op.getResNo());
|
2006-09-20 22:03:51 +00:00
|
|
|
}
|
|
|
|
|
2007-02-28 06:10:12 +00:00
|
|
|
|
2008-01-05 16:56:59 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Fast Calling Convention (tail call) implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// Like std call, callee cleans arguments, convention except that ECX is
|
|
|
|
// reserved for storing the tail called function address. Only 2 registers are
|
|
|
|
// free for argument passing (inreg). Tail call optimization is performed
|
|
|
|
// provided:
|
|
|
|
// * tailcallopt is enabled
|
|
|
|
// * caller/callee are fastcc
|
2008-02-26 10:21:54 +00:00
|
|
|
// On X86_64 architecture with GOT-style position independent code only local
|
|
|
|
// (within module) calls are supported at the moment.
|
2008-01-05 16:56:59 +00:00
|
|
|
// To keep the stack aligned according to platform abi the function
|
|
|
|
// GetAlignedArgumentStackSize ensures that argument delta is always multiples
|
|
|
|
// of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
|
|
|
|
// If a tail called function callee has more arguments than the caller the
|
|
|
|
// caller needs to make sure that there is room to move the RETADDR to. This is
|
|
|
|
// achieved by reserving an area the size of the argument delta right after the
|
|
|
|
// original REtADDR, but before the saved framepointer or the spilled registers
|
|
|
|
// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
|
|
|
|
// stack layout:
|
|
|
|
// arg1
|
|
|
|
// arg2
|
|
|
|
// RETADDR
|
|
|
|
// [ new RETADDR
|
|
|
|
// move area ]
|
|
|
|
// (possible EBP)
|
|
|
|
// ESI
|
|
|
|
// EDI
|
|
|
|
// local1 ..
|
|
|
|
|
|
|
|
/// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
|
|
|
|
/// for a 16 byte align requirement.
|
|
|
|
unsigned X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
|
|
|
|
SelectionDAG& DAG) {
|
2008-09-07 09:07:23 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
const TargetMachine &TM = MF.getTarget();
|
|
|
|
const TargetFrameInfo &TFI = *TM.getFrameInfo();
|
|
|
|
unsigned StackAlignment = TFI.getStackAlignment();
|
|
|
|
uint64_t AlignMask = StackAlignment - 1;
|
|
|
|
int64_t Offset = StackSize;
|
2008-09-09 18:22:57 +00:00
|
|
|
uint64_t SlotSize = TD->getPointerSize();
|
2008-09-07 09:07:23 +00:00
|
|
|
if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
|
|
|
|
// Number smaller than 12 so just add the difference.
|
|
|
|
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
|
|
|
|
} else {
|
|
|
|
// Mask out lower bits, add stackalignment once plus the 12 bytes.
|
|
|
|
Offset = ((~AlignMask) & Offset) + StackAlignment +
|
|
|
|
(StackAlignment-SlotSize);
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
2008-09-07 09:07:23 +00:00
|
|
|
return Offset;
|
2008-01-05 16:56:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// IsEligibleForTailCallElimination - Check to see whether the next instruction
|
|
|
|
/// following the call is a return. A function is eligible if caller/callee
|
|
|
|
/// calling conventions match, currently only fastcc supports tail calls, and
|
|
|
|
/// the function CALL is immediatly followed by a RET.
|
2008-09-13 01:54:27 +00:00
|
|
|
bool X86TargetLowering::IsEligibleForTailCallOptimization(CallSDNode *TheCall,
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ret,
|
2008-01-05 16:56:59 +00:00
|
|
|
SelectionDAG& DAG) const {
|
|
|
|
if (!PerformTailCallOpt)
|
|
|
|
return false;
|
|
|
|
|
2008-09-13 01:54:27 +00:00
|
|
|
if (CheckTailCallReturnConstraints(TheCall, Ret)) {
|
2008-01-05 16:56:59 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
unsigned CallerCC = MF.getFunction()->getCallingConv();
|
2008-09-13 01:54:27 +00:00
|
|
|
unsigned CalleeCC= TheCall->getCallingConv();
|
2008-01-05 16:56:59 +00:00
|
|
|
if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
|
2008-09-13 01:54:27 +00:00
|
|
|
SDValue Callee = TheCall->getCallee();
|
2008-02-26 10:21:54 +00:00
|
|
|
// On x86/32Bit PIC/GOT tail calls are supported.
|
2008-01-05 16:56:59 +00:00
|
|
|
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ ||
|
2008-02-26 10:21:54 +00:00
|
|
|
!Subtarget->isPICStyleGOT()|| !Subtarget->is64Bit())
|
2008-01-05 16:56:59 +00:00
|
|
|
return true;
|
|
|
|
|
2008-02-26 10:21:54 +00:00
|
|
|
// Can only do local tail calls (in same module, hidden or protected) on
|
|
|
|
// x86_64 PIC/GOT at the moment.
|
2008-01-05 16:56:59 +00:00
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
|
|
|
|
return G->getGlobal()->hasHiddenVisibility()
|
|
|
|
|| G->getGlobal()->hasProtectedVisibility();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-09-03 23:12:08 +00:00
|
|
|
FastISel *
|
|
|
|
X86TargetLowering::createFastISel(MachineFunction &mf,
|
2008-09-23 21:53:34 +00:00
|
|
|
MachineModuleInfo *mmo,
|
2008-09-03 23:12:08 +00:00
|
|
|
DenseMap<const Value *, unsigned> &vm,
|
|
|
|
DenseMap<const BasicBlock *,
|
2008-09-10 20:11:02 +00:00
|
|
|
MachineBasicBlock *> &bm,
|
2008-10-14 23:54:11 +00:00
|
|
|
DenseMap<const AllocaInst *, int> &am
|
|
|
|
#ifndef NDEBUG
|
|
|
|
, SmallSet<Instruction*, 8> &cil
|
|
|
|
#endif
|
|
|
|
) {
|
|
|
|
return X86::createFastISel(mf, mmo, vm, bm, am
|
|
|
|
#ifndef NDEBUG
|
|
|
|
, cil
|
|
|
|
#endif
|
|
|
|
);
|
2008-08-19 21:32:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-02-28 06:10:12 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Other Lowering Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) {
|
2007-08-15 17:12:32 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
|
|
|
int ReturnAddrIndex = FuncInfo->getRAIndex();
|
2008-09-09 18:22:57 +00:00
|
|
|
uint64_t SlotSize = TD->getPointerSize();
|
2007-08-15 17:12:32 +00:00
|
|
|
|
2005-11-15 00:40:23 +00:00
|
|
|
if (ReturnAddrIndex == 0) {
|
|
|
|
// Set up a frame object for the return address.
|
2008-09-09 18:22:57 +00:00
|
|
|
ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize);
|
2007-08-15 17:12:32 +00:00
|
|
|
FuncInfo->setRAIndex(ReturnAddrIndex);
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
|
|
|
|
2006-09-08 06:48:29 +00:00
|
|
|
return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-01-30 23:41:35 +00:00
|
|
|
/// translateX86CC - do a one to one translation of a ISD::CondCode to the X86
|
|
|
|
/// specific condition code. It returns a false if it cannot do a direct
|
2006-09-13 03:22:10 +00:00
|
|
|
/// translation. X86CC is the translated CondCode. LHS/RHS are modified as
|
|
|
|
/// needed.
|
2006-04-05 23:38:46 +00:00
|
|
|
static bool translateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
|
2008-07-27 21:46:04 +00:00
|
|
|
unsigned &X86CC, SDValue &LHS, SDValue &RHS,
|
2006-09-13 03:22:10 +00:00
|
|
|
SelectionDAG &DAG) {
|
2006-10-20 17:42:20 +00:00
|
|
|
X86CC = X86::COND_INVALID;
|
2006-01-06 00:43:03 +00:00
|
|
|
if (!isFP) {
|
2006-09-13 17:04:54 +00:00
|
|
|
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
|
|
|
|
if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
|
|
|
|
// X > -1 -> X == 0, jump !sign.
|
|
|
|
RHS = DAG.getConstant(0, RHS.getValueType());
|
2006-10-20 17:42:20 +00:00
|
|
|
X86CC = X86::COND_NS;
|
2006-09-13 17:04:54 +00:00
|
|
|
return true;
|
|
|
|
} else if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
|
|
|
|
// X < 0 -> X == 0, jump on sign.
|
2006-10-20 17:42:20 +00:00
|
|
|
X86CC = X86::COND_S;
|
2006-09-13 17:04:54 +00:00
|
|
|
return true;
|
2008-09-12 16:56:44 +00:00
|
|
|
} else if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
|
2007-09-17 14:49:27 +00:00
|
|
|
// X < 1 -> X <= 0
|
|
|
|
RHS = DAG.getConstant(0, RHS.getValueType());
|
|
|
|
X86CC = X86::COND_LE;
|
|
|
|
return true;
|
2006-09-13 17:04:54 +00:00
|
|
|
}
|
2006-09-13 03:22:10 +00:00
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-01-06 00:43:03 +00:00
|
|
|
switch (SetCCOpcode) {
|
|
|
|
default: break;
|
2006-10-20 17:42:20 +00:00
|
|
|
case ISD::SETEQ: X86CC = X86::COND_E; break;
|
|
|
|
case ISD::SETGT: X86CC = X86::COND_G; break;
|
|
|
|
case ISD::SETGE: X86CC = X86::COND_GE; break;
|
|
|
|
case ISD::SETLT: X86CC = X86::COND_L; break;
|
|
|
|
case ISD::SETLE: X86CC = X86::COND_LE; break;
|
|
|
|
case ISD::SETNE: X86CC = X86::COND_NE; break;
|
|
|
|
case ISD::SETULT: X86CC = X86::COND_B; break;
|
|
|
|
case ISD::SETUGT: X86CC = X86::COND_A; break;
|
|
|
|
case ISD::SETULE: X86CC = X86::COND_BE; break;
|
|
|
|
case ISD::SETUGE: X86CC = X86::COND_AE; break;
|
2006-01-06 00:43:03 +00:00
|
|
|
}
|
|
|
|
} else {
|
2008-10-24 13:03:10 +00:00
|
|
|
// First determine if it is required or is profitable to flip the operands.
|
|
|
|
|
|
|
|
// If LHS is a foldable load, but RHS is not, flip the condition.
|
|
|
|
if ((ISD::isNON_EXTLoad(LHS.getNode()) && LHS.hasOneUse()) &&
|
|
|
|
!(ISD::isNON_EXTLoad(RHS.getNode()) && RHS.hasOneUse())) {
|
|
|
|
SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
}
|
|
|
|
|
2008-08-29 23:22:12 +00:00
|
|
|
switch (SetCCOpcode) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETOLT:
|
|
|
|
case ISD::SETOLE:
|
|
|
|
case ISD::SETUGT:
|
|
|
|
case ISD::SETUGE:
|
2008-10-24 13:03:10 +00:00
|
|
|
std::swap(LHS, RHS);
|
2008-08-29 23:22:12 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-01-06 00:43:03 +00:00
|
|
|
// On a floating point condition, the flags are set as follows:
|
|
|
|
// ZF PF CF op
|
|
|
|
// 0 | 0 | 0 | X > Y
|
|
|
|
// 0 | 0 | 1 | X < Y
|
|
|
|
// 1 | 0 | 0 | X == Y
|
|
|
|
// 1 | 1 | 1 | unordered
|
|
|
|
switch (SetCCOpcode) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETUEQ:
|
2008-08-29 23:22:12 +00:00
|
|
|
case ISD::SETEQ:
|
|
|
|
X86CC = X86::COND_E;
|
|
|
|
break;
|
|
|
|
case ISD::SETOLT: // flipped
|
2006-01-06 00:43:03 +00:00
|
|
|
case ISD::SETOGT:
|
2008-08-29 23:22:12 +00:00
|
|
|
case ISD::SETGT:
|
|
|
|
X86CC = X86::COND_A;
|
|
|
|
break;
|
|
|
|
case ISD::SETOLE: // flipped
|
2006-01-06 00:43:03 +00:00
|
|
|
case ISD::SETOGE:
|
2008-08-29 23:22:12 +00:00
|
|
|
case ISD::SETGE:
|
|
|
|
X86CC = X86::COND_AE;
|
|
|
|
break;
|
|
|
|
case ISD::SETUGT: // flipped
|
2006-01-06 00:43:03 +00:00
|
|
|
case ISD::SETULT:
|
2008-08-29 23:22:12 +00:00
|
|
|
case ISD::SETLT:
|
|
|
|
X86CC = X86::COND_B;
|
|
|
|
break;
|
|
|
|
case ISD::SETUGE: // flipped
|
2006-01-06 00:43:03 +00:00
|
|
|
case ISD::SETULE:
|
2008-08-29 23:22:12 +00:00
|
|
|
case ISD::SETLE:
|
|
|
|
X86CC = X86::COND_BE;
|
|
|
|
break;
|
2006-01-06 00:43:03 +00:00
|
|
|
case ISD::SETONE:
|
2008-08-29 23:22:12 +00:00
|
|
|
case ISD::SETNE:
|
|
|
|
X86CC = X86::COND_NE;
|
|
|
|
break;
|
|
|
|
case ISD::SETUO:
|
|
|
|
X86CC = X86::COND_P;
|
|
|
|
break;
|
|
|
|
case ISD::SETO:
|
|
|
|
X86CC = X86::COND_NP;
|
|
|
|
break;
|
2006-01-06 00:43:03 +00:00
|
|
|
}
|
|
|
|
}
|
2006-01-30 23:41:35 +00:00
|
|
|
|
2008-08-29 22:13:21 +00:00
|
|
|
return X86CC != X86::COND_INVALID;
|
2006-01-06 00:43:03 +00:00
|
|
|
}
|
|
|
|
|
2006-01-11 00:33:36 +00:00
|
|
|
/// hasFPCMov - is there a floating point cmov for the specific X86 condition
|
|
|
|
/// code. Current x86 isa includes the following FP cmov instructions:
|
2006-01-10 20:26:56 +00:00
|
|
|
/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
|
2006-01-11 00:33:36 +00:00
|
|
|
static bool hasFPCMov(unsigned X86CC) {
|
2006-01-10 20:26:56 +00:00
|
|
|
switch (X86CC) {
|
|
|
|
default:
|
|
|
|
return false;
|
2006-10-20 17:42:20 +00:00
|
|
|
case X86::COND_B:
|
|
|
|
case X86::COND_BE:
|
|
|
|
case X86::COND_E:
|
|
|
|
case X86::COND_P:
|
|
|
|
case X86::COND_A:
|
|
|
|
case X86::COND_AE:
|
|
|
|
case X86::COND_NE:
|
|
|
|
case X86::COND_NP:
|
2006-01-10 20:26:56 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-04-06 23:23:56 +00:00
|
|
|
/// isUndefOrInRange - Op is either an undef node or a ConstantSDNode. Return
|
2006-04-07 21:53:05 +00:00
|
|
|
/// true if Op is undef or if its value falls within the specified range (L, H].
|
2008-07-27 21:46:04 +00:00
|
|
|
static bool isUndefOrInRange(SDValue Op, unsigned Low, unsigned Hi) {
|
2006-04-06 23:23:56 +00:00
|
|
|
if (Op.getOpcode() == ISD::UNDEF)
|
|
|
|
return true;
|
|
|
|
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Op)->getZExtValue();
|
2006-04-07 21:53:05 +00:00
|
|
|
return (Val >= Low && Val < Hi);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isUndefOrEqual - Op is either an undef node or a ConstantSDNode. Return
|
|
|
|
/// true if Op is undef or if its value equal to the specified value.
|
2008-07-27 21:46:04 +00:00
|
|
|
static bool isUndefOrEqual(SDValue Op, unsigned Val) {
|
2006-04-07 21:53:05 +00:00
|
|
|
if (Op.getOpcode() == ISD::UNDEF)
|
|
|
|
return true;
|
2008-09-12 16:56:44 +00:00
|
|
|
return cast<ConstantSDNode>(Op)->getZExtValue() == Val;
|
2006-04-06 23:23:56 +00:00
|
|
|
}
|
|
|
|
|
2006-03-22 18:59:22 +00:00
|
|
|
/// isPSHUFDMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to PSHUFD.
|
|
|
|
bool X86::isPSHUFDMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
2007-08-02 21:17:01 +00:00
|
|
|
if (N->getNumOperands() != 2 && N->getNumOperands() != 4)
|
2006-03-22 18:59:22 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check if the value doesn't reference the second vector.
|
2006-03-29 23:07:14 +00:00
|
|
|
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-03-31 00:30:29 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
if (cast<ConstantSDNode>(Arg)->getZExtValue() >= e)
|
2006-03-29 23:07:14 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isPSHUFHWMask - Return true if the specified VECTOR_SHUFFLE operand
|
2006-04-05 01:47:37 +00:00
|
|
|
/// specifies a shuffle of elements that is suitable for input to PSHUFHW.
|
2006-03-29 23:07:14 +00:00
|
|
|
bool X86::isPSHUFHWMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Lower quadword copied in order.
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-03-31 00:30:29 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
if (cast<ConstantSDNode>(Arg)->getZExtValue() != i)
|
2006-03-29 23:07:14 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upper quadword shuffled.
|
|
|
|
for (unsigned i = 4; i != 8; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-03-31 00:30:29 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-03-29 23:07:14 +00:00
|
|
|
if (Val < 4 || Val > 7)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isPSHUFLWMask - Return true if the specified VECTOR_SHUFFLE operand
|
2006-04-05 01:47:37 +00:00
|
|
|
/// specifies a shuffle of elements that is suitable for input to PSHUFLW.
|
2006-03-29 23:07:14 +00:00
|
|
|
bool X86::isPSHUFLWMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Upper quadword copied in order.
|
2006-04-07 21:53:05 +00:00
|
|
|
for (unsigned i = 4; i != 8; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i))
|
2006-03-29 23:07:14 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Lower quadword shuffled.
|
2006-04-07 21:53:05 +00:00
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
if (!isUndefOrInRange(N->getOperand(i), 0, 4))
|
2006-03-29 23:07:14 +00:00
|
|
|
return false;
|
2006-03-24 01:18:28 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to SHUFP*.
|
2008-04-16 16:15:27 +00:00
|
|
|
static bool isSHUFPMask(SDOperandPtr Elems, unsigned NumElems) {
|
2006-04-20 08:58:49 +00:00
|
|
|
if (NumElems != 2 && NumElems != 4) return false;
|
2006-03-24 01:18:28 +00:00
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
unsigned Half = NumElems / 2;
|
|
|
|
for (unsigned i = 0; i < Half; ++i)
|
2007-02-25 07:10:00 +00:00
|
|
|
if (!isUndefOrInRange(Elems[i], 0, NumElems))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
|
|
|
for (unsigned i = Half; i < NumElems; ++i)
|
2007-02-25 07:10:00 +00:00
|
|
|
if (!isUndefOrInRange(Elems[i], NumElems, NumElems*2))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
2006-03-24 02:58:06 +00:00
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
return true;
|
|
|
|
}
|
2006-03-24 01:18:28 +00:00
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
bool X86::isSHUFPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 07:10:00 +00:00
|
|
|
return ::isSHUFPMask(N->op_begin(), N->getNumOperands());
|
2006-04-20 08:58:49 +00:00
|
|
|
}
|
2006-03-22 18:59:22 +00:00
|
|
|
|
2007-05-17 18:45:50 +00:00
|
|
|
/// isCommutedSHUFP - Returns true if the shuffle mask is exactly
|
2006-04-20 08:58:49 +00:00
|
|
|
/// the reverse of what x86 shuffles want. x86 shuffles requires the lower
|
|
|
|
/// half elements to come from vector 1 (which would equal the dest.) and
|
|
|
|
/// the upper half to come from vector 2.
|
2008-04-16 16:15:27 +00:00
|
|
|
static bool isCommutedSHUFP(SDOperandPtr Ops, unsigned NumOps) {
|
2007-02-25 07:10:00 +00:00
|
|
|
if (NumOps != 2 && NumOps != 4) return false;
|
2006-04-20 08:58:49 +00:00
|
|
|
|
2007-02-25 07:10:00 +00:00
|
|
|
unsigned Half = NumOps / 2;
|
2006-04-20 08:58:49 +00:00
|
|
|
for (unsigned i = 0; i < Half; ++i)
|
2007-02-25 07:10:00 +00:00
|
|
|
if (!isUndefOrInRange(Ops[i], NumOps, NumOps*2))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
2007-02-25 07:10:00 +00:00
|
|
|
for (unsigned i = Half; i < NumOps; ++i)
|
|
|
|
if (!isUndefOrInRange(Ops[i], 0, NumOps))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
2006-03-22 18:59:22 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
static bool isCommutedSHUFP(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 07:10:00 +00:00
|
|
|
return isCommutedSHUFP(N->op_begin(), N->getNumOperands());
|
2006-04-20 08:58:49 +00:00
|
|
|
}
|
|
|
|
|
2006-03-24 02:58:06 +00:00
|
|
|
/// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVHLPS.
|
|
|
|
bool X86::isMOVHLPSMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
2006-03-28 06:50:32 +00:00
|
|
|
if (N->getNumOperands() != 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
|
2006-04-07 21:53:05 +00:00
|
|
|
return isUndefOrEqual(N->getOperand(0), 6) &&
|
|
|
|
isUndefOrEqual(N->getOperand(1), 7) &&
|
|
|
|
isUndefOrEqual(N->getOperand(2), 2) &&
|
|
|
|
isUndefOrEqual(N->getOperand(3), 3);
|
2006-03-28 06:50:32 +00:00
|
|
|
}
|
|
|
|
|
Fixed a bug which causes x86 be to incorrectly match
shuffle v, undef, <2, ?, 3, ?>
to movhlps
It should match to unpckhps instead.
Added proper matching code for
shuffle v, undef, <2, 3, 2, 3>
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@31519 91177308-0d34-0410-b5e6-96231b3b80d8
2006-11-07 22:14:24 +00:00
|
|
|
/// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
|
|
|
|
/// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
|
|
|
|
/// <2, 3, 2, 3>
|
|
|
|
bool X86::isMOVHLPS_v_undef_Mask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect bit0 == 2, bit1 == 3, bit2 == 2, bit3 == 3
|
|
|
|
return isUndefOrEqual(N->getOperand(0), 2) &&
|
|
|
|
isUndefOrEqual(N->getOperand(1), 3) &&
|
|
|
|
isUndefOrEqual(N->getOperand(2), 2) &&
|
|
|
|
isUndefOrEqual(N->getOperand(3), 3);
|
|
|
|
}
|
|
|
|
|
2006-04-06 23:23:56 +00:00
|
|
|
/// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
|
|
|
|
bool X86::isMOVLPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
if (NumElems != 2 && NumElems != 4)
|
|
|
|
return false;
|
|
|
|
|
2006-04-07 21:53:05 +00:00
|
|
|
for (unsigned i = 0; i < NumElems/2; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i + NumElems))
|
|
|
|
return false;
|
2006-04-06 23:23:56 +00:00
|
|
|
|
2006-04-07 21:53:05 +00:00
|
|
|
for (unsigned i = NumElems/2; i < NumElems; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i))
|
|
|
|
return false;
|
2006-04-06 23:23:56 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isMOVHPMask - Return true if the specified VECTOR_SHUFFLE operand
|
2006-04-19 20:35:22 +00:00
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVHP{S|D}
|
|
|
|
/// and MOVLHPS.
|
2006-04-06 23:23:56 +00:00
|
|
|
bool X86::isMOVHPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
if (NumElems != 2 && NumElems != 4)
|
|
|
|
return false;
|
|
|
|
|
2006-04-07 21:53:05 +00:00
|
|
|
for (unsigned i = 0; i < NumElems/2; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i))
|
|
|
|
return false;
|
2006-04-06 23:23:56 +00:00
|
|
|
|
|
|
|
for (unsigned i = 0; i < NumElems/2; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i + NumElems/2);
|
2006-04-07 21:53:05 +00:00
|
|
|
if (!isUndefOrEqual(Arg, i + NumElems))
|
|
|
|
return false;
|
2006-04-06 23:23:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-03-28 02:43:26 +00:00
|
|
|
/// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to UNPCKL.
|
2008-04-16 16:15:27 +00:00
|
|
|
bool static isUNPCKLMask(SDOperandPtr Elts, unsigned NumElts,
|
2007-02-25 07:10:00 +00:00
|
|
|
bool V2IsSplat = false) {
|
|
|
|
if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
|
2006-03-24 02:58:06 +00:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 07:10:00 +00:00
|
|
|
for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue BitI = Elts[i];
|
|
|
|
SDValue BitI1 = Elts[i+1];
|
2006-04-07 21:53:05 +00:00
|
|
|
if (!isUndefOrEqual(BitI, j))
|
|
|
|
return false;
|
2006-04-20 08:58:49 +00:00
|
|
|
if (V2IsSplat) {
|
2007-02-25 07:10:00 +00:00
|
|
|
if (isUndefOrEqual(BitI1, NumElts))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
|
|
|
} else {
|
2007-02-25 07:10:00 +00:00
|
|
|
if (!isUndefOrEqual(BitI1, j + NumElts))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
2006-03-28 02:43:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2006-03-24 02:58:06 +00:00
|
|
|
}
|
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
bool X86::isUNPCKLMask(SDNode *N, bool V2IsSplat) {
|
2006-03-28 00:39:58 +00:00
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 07:10:00 +00:00
|
|
|
return ::isUNPCKLMask(N->op_begin(), N->getNumOperands(), V2IsSplat);
|
2006-04-20 08:58:49 +00:00
|
|
|
}
|
2006-03-28 00:39:58 +00:00
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
/// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to UNPCKH.
|
2008-04-16 16:15:27 +00:00
|
|
|
bool static isUNPCKHMask(SDOperandPtr Elts, unsigned NumElts,
|
2007-02-25 07:10:00 +00:00
|
|
|
bool V2IsSplat = false) {
|
|
|
|
if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
|
2006-03-28 00:39:58 +00:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 07:10:00 +00:00
|
|
|
for (unsigned i = 0, j = 0; i != NumElts; i += 2, ++j) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue BitI = Elts[i];
|
|
|
|
SDValue BitI1 = Elts[i+1];
|
2007-02-25 07:10:00 +00:00
|
|
|
if (!isUndefOrEqual(BitI, j + NumElts/2))
|
2006-04-07 21:53:05 +00:00
|
|
|
return false;
|
2006-04-20 08:58:49 +00:00
|
|
|
if (V2IsSplat) {
|
2007-02-25 07:10:00 +00:00
|
|
|
if (isUndefOrEqual(BitI1, NumElts))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
|
|
|
} else {
|
2007-02-25 07:10:00 +00:00
|
|
|
if (!isUndefOrEqual(BitI1, j + NumElts/2 + NumElts))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
2006-03-28 00:39:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
bool X86::isUNPCKHMask(SDNode *N, bool V2IsSplat) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 07:10:00 +00:00
|
|
|
return ::isUNPCKHMask(N->op_begin(), N->getNumOperands(), V2IsSplat);
|
2006-04-20 08:58:49 +00:00
|
|
|
}
|
|
|
|
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27437 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-05 07:20:06 +00:00
|
|
|
/// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
|
|
|
|
/// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
|
|
|
|
/// <0, 0, 1, 1>
|
|
|
|
bool X86::isUNPCKL_v_undef_Mask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36403 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-24 21:16:55 +00:00
|
|
|
if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27437 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-05 07:20:06 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0, j = 0; i != NumElems; i += 2, ++j) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue BitI = N->getOperand(i);
|
|
|
|
SDValue BitI1 = N->getOperand(i+1);
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27437 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-05 07:20:06 +00:00
|
|
|
|
2006-04-07 21:53:05 +00:00
|
|
|
if (!isUndefOrEqual(BitI, j))
|
|
|
|
return false;
|
|
|
|
if (!isUndefOrEqual(BitI1, j))
|
|
|
|
return false;
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27437 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-05 07:20:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36403 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-24 21:16:55 +00:00
|
|
|
/// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
|
|
|
|
/// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
|
|
|
|
/// <2, 2, 3, 3>
|
|
|
|
bool X86::isUNPCKH_v_undef_Mask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
if (NumElems != 2 && NumElems != 4 && NumElems != 8 && NumElems != 16)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 0, j = NumElems / 2; i != NumElems; i += 2, ++j) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue BitI = N->getOperand(i);
|
|
|
|
SDValue BitI1 = N->getOperand(i + 1);
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36403 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-24 21:16:55 +00:00
|
|
|
|
|
|
|
if (!isUndefOrEqual(BitI, j))
|
|
|
|
return false;
|
|
|
|
if (!isUndefOrEqual(BitI1, j))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
/// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVSS,
|
|
|
|
/// MOVSD, and MOVD, i.e. setting the lowest element.
|
2008-04-16 16:15:27 +00:00
|
|
|
static bool isMOVLMask(SDOperandPtr Elts, unsigned NumElts) {
|
2007-12-06 22:14:22 +00:00
|
|
|
if (NumElts != 2 && NumElts != 4)
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 07:10:00 +00:00
|
|
|
if (!isUndefOrEqual(Elts[0], NumElts))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 07:10:00 +00:00
|
|
|
for (unsigned i = 1; i < NumElts; ++i) {
|
|
|
|
if (!isUndefOrEqual(Elts[i], i))
|
2006-04-20 08:58:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
bool X86::isMOVLMask(SDNode *N) {
|
2006-04-11 00:19:04 +00:00
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 07:10:00 +00:00
|
|
|
return ::isMOVLMask(N->op_begin(), N->getNumOperands());
|
2006-04-20 08:58:49 +00:00
|
|
|
}
|
2006-04-11 00:19:04 +00:00
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
/// isCommutedMOVL - Returns true if the shuffle mask is except the reverse
|
|
|
|
/// of what x86 movss want. X86 movs requires the lowest element to be lowest
|
2006-04-20 08:58:49 +00:00
|
|
|
/// element of vector 2 and the other elements to come from vector 1 in order.
|
2008-04-16 16:15:27 +00:00
|
|
|
static bool isCommutedMOVL(SDOperandPtr Ops, unsigned NumOps,
|
2007-02-25 07:10:00 +00:00
|
|
|
bool V2IsSplat = false,
|
2006-09-08 01:50:06 +00:00
|
|
|
bool V2IsUndef = false) {
|
2007-02-25 07:10:00 +00:00
|
|
|
if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
|
2006-04-11 00:19:04 +00:00
|
|
|
return false;
|
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
if (!isUndefOrEqual(Ops[0], 0))
|
2006-04-11 00:19:04 +00:00
|
|
|
return false;
|
|
|
|
|
2007-02-25 07:10:00 +00:00
|
|
|
for (unsigned i = 1; i < NumOps; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = Ops[i];
|
2007-02-25 07:10:00 +00:00
|
|
|
if (!(isUndefOrEqual(Arg, i+NumOps) ||
|
|
|
|
(V2IsUndef && isUndefOrInRange(Arg, NumOps, NumOps*2)) ||
|
|
|
|
(V2IsSplat && isUndefOrEqual(Arg, NumOps))))
|
2006-09-08 01:50:06 +00:00
|
|
|
return false;
|
2006-04-11 00:19:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
Handle canonical form of e.g.
vector_shuffle v1, v1, <0, 4, 1, 5, 2, 6, 3, 7>
This is turned into
vector_shuffle v1, <undef>, <0, 0, 1, 1, 2, 2, 3, 3>
by dag combiner.
It would match a {p}unpckl on x86.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27437 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-05 07:20:06 +00:00
|
|
|
|
2006-09-08 01:50:06 +00:00
|
|
|
static bool isCommutedMOVL(SDNode *N, bool V2IsSplat = false,
|
|
|
|
bool V2IsUndef = false) {
|
2006-04-20 08:58:49 +00:00
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
2007-02-25 07:10:00 +00:00
|
|
|
return isCommutedMOVL(N->op_begin(), N->getNumOperands(),
|
|
|
|
V2IsSplat, V2IsUndef);
|
2006-04-20 08:58:49 +00:00
|
|
|
}
|
|
|
|
|
2006-04-14 21:59:03 +00:00
|
|
|
/// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
|
|
|
|
bool X86::isMOVSHDUPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect 1, 1, 3, 3
|
|
|
|
for (unsigned i = 0; i < 2; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-04-14 21:59:03 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-04-14 21:59:03 +00:00
|
|
|
if (Val != 1) return false;
|
|
|
|
}
|
2006-04-15 05:37:34 +00:00
|
|
|
|
|
|
|
bool HasHi = false;
|
2006-04-14 21:59:03 +00:00
|
|
|
for (unsigned i = 2; i < 4; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-04-14 21:59:03 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-04-14 21:59:03 +00:00
|
|
|
if (Val != 3) return false;
|
2006-04-15 05:37:34 +00:00
|
|
|
HasHi = true;
|
2006-04-14 21:59:03 +00:00
|
|
|
}
|
2006-04-15 03:13:24 +00:00
|
|
|
|
2006-04-15 05:37:34 +00:00
|
|
|
// Don't use movshdup if it can be done with a shufps.
|
|
|
|
return HasHi;
|
2006-04-14 21:59:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
|
|
|
|
bool X86::isMOVSLDUPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect 0, 0, 2, 2
|
|
|
|
for (unsigned i = 0; i < 2; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-04-14 21:59:03 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-04-14 21:59:03 +00:00
|
|
|
if (Val != 0) return false;
|
|
|
|
}
|
2006-04-15 05:37:34 +00:00
|
|
|
|
|
|
|
bool HasHi = false;
|
2006-04-14 21:59:03 +00:00
|
|
|
for (unsigned i = 2; i < 4; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-04-14 21:59:03 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-04-14 21:59:03 +00:00
|
|
|
if (Val != 2) return false;
|
2006-04-15 05:37:34 +00:00
|
|
|
HasHi = true;
|
2006-04-14 21:59:03 +00:00
|
|
|
}
|
2006-04-15 03:13:24 +00:00
|
|
|
|
2006-04-15 05:37:34 +00:00
|
|
|
// Don't use movshdup if it can be done with a shufps.
|
|
|
|
return HasHi;
|
2006-04-14 21:59:03 +00:00
|
|
|
}
|
|
|
|
|
2007-06-19 00:02:56 +00:00
|
|
|
/// isIdentityMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a identity operation on the LHS or RHS.
|
|
|
|
static bool isIdentityMask(SDNode *N, bool RHS = false) {
|
|
|
|
unsigned NumElems = N->getNumOperands();
|
|
|
|
for (unsigned i = 0; i < NumElems; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i + (RHS ? NumElems : 0)))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-03-22 02:53:00 +00:00
|
|
|
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
|
|
|
|
/// a splat of a single element.
|
2006-04-17 20:43:08 +00:00
|
|
|
static bool isSplatMask(SDNode *N) {
|
2006-03-22 02:53:00 +00:00
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
// This is a splat operation if each element of the permute is the same, and
|
|
|
|
// if the value doesn't reference the second vector.
|
2006-04-19 23:28:59 +00:00
|
|
|
unsigned NumElems = N->getNumOperands();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ElementBase;
|
2006-04-19 23:28:59 +00:00
|
|
|
unsigned i = 0;
|
|
|
|
for (; i != NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = N->getOperand(i);
|
2006-11-02 20:25:50 +00:00
|
|
|
if (isa<ConstantSDNode>(Elt)) {
|
2006-04-19 23:28:59 +00:00
|
|
|
ElementBase = Elt;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (!ElementBase.getNode())
|
2006-04-19 23:28:59 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
for (; i != NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-03-31 00:30:29 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2006-04-19 23:28:59 +00:00
|
|
|
if (Arg != ElementBase) return false;
|
2006-03-22 02:53:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure it is a splat of the first vector operand.
|
2008-09-12 16:56:44 +00:00
|
|
|
return cast<ConstantSDNode>(ElementBase)->getZExtValue() < NumElems;
|
2006-03-22 02:53:00 +00:00
|
|
|
}
|
|
|
|
|
2006-04-17 20:43:08 +00:00
|
|
|
/// isSplatMask - Return true if the specified VECTOR_SHUFFLE operand specifies
|
|
|
|
/// a splat of a single element and it's a 2 or 4 element mask.
|
|
|
|
bool X86::isSplatMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
2006-04-19 23:28:59 +00:00
|
|
|
// We can only splat 64-bit, and 32-bit quantities with a single instruction.
|
2006-04-17 20:43:08 +00:00
|
|
|
if (N->getNumOperands() != 4 && N->getNumOperands() != 2)
|
|
|
|
return false;
|
|
|
|
return ::isSplatMask(N);
|
|
|
|
}
|
|
|
|
|
2006-10-27 21:08:32 +00:00
|
|
|
/// isSplatLoMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a splat of zero element.
|
|
|
|
bool X86::isSplatLoMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
2006-11-21 00:01:06 +00:00
|
|
|
for (unsigned i = 0, e = N->getNumOperands(); i < e; ++i)
|
2006-10-27 21:08:32 +00:00
|
|
|
if (!isUndefOrEqual(N->getOperand(i), 0))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-09-25 20:50:48 +00:00
|
|
|
/// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a shuffle of elements that is suitable for input to MOVDDUP.
|
|
|
|
bool X86::isMOVDDUPMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
unsigned e = N->getNumOperands() / 2;
|
|
|
|
for (unsigned i = 0; i < e; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(i), i))
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 0; i < e; ++i)
|
|
|
|
if (!isUndefOrEqual(N->getOperand(e+i), i))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-03-22 08:01:21 +00:00
|
|
|
/// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
|
|
|
|
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUF* and SHUFP*
|
|
|
|
/// instructions.
|
|
|
|
unsigned X86::getShuffleSHUFImmediate(SDNode *N) {
|
2006-03-22 02:53:00 +00:00
|
|
|
unsigned NumOperands = N->getNumOperands();
|
|
|
|
unsigned Shift = (NumOperands == 4) ? 2 : 1;
|
|
|
|
unsigned Mask = 0;
|
2006-03-28 23:41:33 +00:00
|
|
|
for (unsigned i = 0; i < NumOperands; ++i) {
|
2006-03-31 00:30:29 +00:00
|
|
|
unsigned Val = 0;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(NumOperands-i-1);
|
2006-03-31 00:30:29 +00:00
|
|
|
if (Arg.getOpcode() != ISD::UNDEF)
|
2008-09-12 16:56:44 +00:00
|
|
|
Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-03-24 01:18:28 +00:00
|
|
|
if (Val >= NumOperands) Val -= NumOperands;
|
2006-03-22 08:01:21 +00:00
|
|
|
Mask |= Val;
|
2006-03-28 23:41:33 +00:00
|
|
|
if (i != NumOperands - 1)
|
|
|
|
Mask <<= Shift;
|
|
|
|
}
|
2006-03-22 08:01:21 +00:00
|
|
|
|
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
2006-03-29 23:07:14 +00:00
|
|
|
/// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
|
|
|
|
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFHW
|
|
|
|
/// instructions.
|
|
|
|
unsigned X86::getShufflePSHUFHWImmediate(SDNode *N) {
|
|
|
|
unsigned Mask = 0;
|
|
|
|
// 8 nodes, but we only care about the last 4.
|
|
|
|
for (unsigned i = 7; i >= 4; --i) {
|
2006-03-31 00:30:29 +00:00
|
|
|
unsigned Val = 0;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-03-31 00:30:29 +00:00
|
|
|
if (Arg.getOpcode() != ISD::UNDEF)
|
2008-09-12 16:56:44 +00:00
|
|
|
Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-03-29 23:07:14 +00:00
|
|
|
Mask |= (Val - 4);
|
|
|
|
if (i != 4)
|
|
|
|
Mask <<= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
|
|
|
|
/// the specified isShuffleMask VECTOR_SHUFFLE mask with PSHUFLW
|
|
|
|
/// instructions.
|
|
|
|
unsigned X86::getShufflePSHUFLWImmediate(SDNode *N) {
|
|
|
|
unsigned Mask = 0;
|
|
|
|
// 8 nodes, but we only care about the first 4.
|
|
|
|
for (int i = 3; i >= 0; --i) {
|
2006-03-31 00:30:29 +00:00
|
|
|
unsigned Val = 0;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-03-31 00:30:29 +00:00
|
|
|
if (Arg.getOpcode() != ISD::UNDEF)
|
2008-09-12 16:56:44 +00:00
|
|
|
Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-03-29 23:07:14 +00:00
|
|
|
Mask |= Val;
|
|
|
|
if (i != 0)
|
|
|
|
Mask <<= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
2006-04-05 01:47:37 +00:00
|
|
|
/// isPSHUFHW_PSHUFLWMask - true if the specified VECTOR_SHUFFLE operand
|
|
|
|
/// specifies a 8 element shuffle that can be broken into a pair of
|
|
|
|
/// PSHUFHW and PSHUFLW.
|
|
|
|
static bool isPSHUFHW_PSHUFLWMask(SDNode *N) {
|
|
|
|
assert(N->getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
if (N->getNumOperands() != 8)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Lower quadword shuffled.
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-04-05 01:47:37 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Val >= 4)
|
2006-04-05 01:47:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upper quadword shuffled.
|
|
|
|
for (unsigned i = 4; i != 8; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = N->getOperand(i);
|
2006-04-05 01:47:37 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) continue;
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-04-05 01:47:37 +00:00
|
|
|
if (Val < 4 || Val > 7)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
/// CommuteVectorShuffle - Swap vector_shuffle operands as well as
|
2006-04-06 23:23:56 +00:00
|
|
|
/// values in ther permute mask.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue CommuteVectorShuffle(SDValue Op, SDValue &V1,
|
|
|
|
SDValue &V2, SDValue &Mask,
|
2006-10-25 21:49:50 +00:00
|
|
|
SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT MaskVT = Mask.getValueType();
|
|
|
|
MVT EltVT = MaskVT.getVectorElementType();
|
2006-04-06 23:23:56 +00:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2006-04-06 23:23:56 +00:00
|
|
|
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = Mask.getOperand(i);
|
2006-04-19 22:48:17 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
|
|
|
|
continue;
|
|
|
|
}
|
2006-04-06 23:23:56 +00:00
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-04-06 23:23:56 +00:00
|
|
|
if (Val < NumElems)
|
|
|
|
MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
|
|
|
|
}
|
|
|
|
|
2006-10-25 21:49:50 +00:00
|
|
|
std::swap(V1, V2);
|
2007-12-07 08:07:39 +00:00
|
|
|
Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems);
|
2006-10-25 21:49:50 +00:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
|
2006-04-06 23:23:56 +00:00
|
|
|
}
|
|
|
|
|
2007-12-07 21:30:01 +00:00
|
|
|
/// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
|
|
|
|
/// the two vector operands have swapped position.
|
2007-12-07 08:07:39 +00:00
|
|
|
static
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue CommuteVectorShuffleMask(SDValue Mask, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = Mask.getValueType();
|
|
|
|
MVT EltVT = MaskVT.getVectorElementType();
|
2007-12-07 08:07:39 +00:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2007-12-07 08:07:39 +00:00
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = Mask.getOperand(i);
|
2007-12-07 08:07:39 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, EltVT));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
assert(isa<ConstantSDNode>(Arg) && "Invalid VECTOR_SHUFFLE mask!");
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2007-12-07 08:07:39 +00:00
|
|
|
if (Val < NumElems)
|
|
|
|
MaskVec.push_back(DAG.getConstant(Val + NumElems, EltVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(Val - NumElems, EltVT));
|
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], NumElems);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-04-19 20:35:22 +00:00
|
|
|
/// ShouldXformToMOVHLPS - Return true if the node should be transformed to
|
|
|
|
/// match movhlps. The lower half elements should come from upper half of
|
|
|
|
/// V1 (and in order), and the upper half elements should come from the upper
|
2006-11-21 00:01:06 +00:00
|
|
|
/// half of V2 (and in order).
|
2006-04-19 20:35:22 +00:00
|
|
|
static bool ShouldXformToMOVHLPS(SDNode *Mask) {
|
|
|
|
unsigned NumElems = Mask->getNumOperands();
|
|
|
|
if (NumElems != 4)
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 0, e = 2; i != e; ++i)
|
|
|
|
if (!isUndefOrEqual(Mask->getOperand(i), i+2))
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 2; i != 4; ++i)
|
|
|
|
if (!isUndefOrEqual(Mask->getOperand(i), i+4))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-04-06 23:23:56 +00:00
|
|
|
/// isScalarLoadToVector - Returns true if the node is a scalar load that
|
2008-05-08 00:57:18 +00:00
|
|
|
/// is promoted to a vector. It also returns the LoadSDNode by reference if
|
|
|
|
/// required.
|
|
|
|
static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = NULL) {
|
2008-09-25 20:50:48 +00:00
|
|
|
if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
|
|
|
|
return false;
|
|
|
|
N = N->getOperand(0).getNode();
|
|
|
|
if (!ISD::isNON_EXTLoad(N))
|
|
|
|
return false;
|
|
|
|
if (LD)
|
|
|
|
*LD = cast<LoadSDNode>(N);
|
|
|
|
return true;
|
2006-04-06 23:23:56 +00:00
|
|
|
}
|
|
|
|
|
2006-04-19 20:35:22 +00:00
|
|
|
/// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
|
|
|
|
/// match movlp{s|d}. The lower half elements should come from lower half of
|
|
|
|
/// V1 (and in order), and the upper half elements should come from the upper
|
|
|
|
/// half of V2 (and in order). And since V1 will become the source of the
|
|
|
|
/// MOVLP, it must be either a vector load or a scalar load to vector.
|
2006-10-09 21:39:25 +00:00
|
|
|
static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, SDNode *Mask) {
|
2006-10-09 20:57:25 +00:00
|
|
|
if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
|
2006-04-19 20:35:22 +00:00
|
|
|
return false;
|
2006-10-09 21:39:25 +00:00
|
|
|
// Is V2 is a vector load, don't do this transformation. We will try to use
|
|
|
|
// load folding shufps op.
|
|
|
|
if (ISD::isNON_EXTLoad(V2))
|
|
|
|
return false;
|
2006-04-06 23:23:56 +00:00
|
|
|
|
2006-04-19 20:35:22 +00:00
|
|
|
unsigned NumElems = Mask->getNumOperands();
|
|
|
|
if (NumElems != 2 && NumElems != 4)
|
|
|
|
return false;
|
|
|
|
for (unsigned i = 0, e = NumElems/2; i != e; ++i)
|
|
|
|
if (!isUndefOrEqual(Mask->getOperand(i), i))
|
|
|
|
return false;
|
|
|
|
for (unsigned i = NumElems/2; i != NumElems; ++i)
|
|
|
|
if (!isUndefOrEqual(Mask->getOperand(i), i+NumElems))
|
|
|
|
return false;
|
|
|
|
return true;
|
2006-04-06 23:23:56 +00:00
|
|
|
}
|
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are
|
|
|
|
/// all the same.
|
|
|
|
static bool isSplatVector(SDNode *N) {
|
|
|
|
if (N->getOpcode() != ISD::BUILD_VECTOR)
|
|
|
|
return false;
|
2006-04-06 23:23:56 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue SplatValue = N->getOperand(0);
|
2006-04-20 08:58:49 +00:00
|
|
|
for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
|
|
|
|
if (N->getOperand(i) != SplatValue)
|
2006-04-06 23:23:56 +00:00
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2006-09-08 01:50:06 +00:00
|
|
|
/// isUndefShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
|
|
|
|
/// to an undef.
|
|
|
|
static bool isUndefShuffle(SDNode *N) {
|
2007-05-17 18:45:50 +00:00
|
|
|
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
|
2006-09-08 01:50:06 +00:00
|
|
|
return false;
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V1 = N->getOperand(0);
|
|
|
|
SDValue V2 = N->getOperand(1);
|
|
|
|
SDValue Mask = N->getOperand(2);
|
2006-09-08 01:50:06 +00:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = Mask.getOperand(i);
|
2006-09-08 01:50:06 +00:00
|
|
|
if (Arg.getOpcode() != ISD::UNDEF) {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-09-08 01:50:06 +00:00
|
|
|
if (Val < NumElems && V1.getOpcode() != ISD::UNDEF)
|
|
|
|
return false;
|
|
|
|
else if (Val >= NumElems && V2.getOpcode() != ISD::UNDEF)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2007-05-17 18:45:50 +00:00
|
|
|
/// isZeroNode - Returns true if Elt is a constant zero or a floating point
|
|
|
|
/// constant +0.0.
|
2008-07-27 21:46:04 +00:00
|
|
|
static inline bool isZeroNode(SDValue Elt) {
|
2007-05-17 18:45:50 +00:00
|
|
|
return ((isa<ConstantSDNode>(Elt) &&
|
2008-09-12 16:56:44 +00:00
|
|
|
cast<ConstantSDNode>(Elt)->getZExtValue() == 0) ||
|
2007-05-17 18:45:50 +00:00
|
|
|
(isa<ConstantFPSDNode>(Elt) &&
|
2007-08-31 04:03:46 +00:00
|
|
|
cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
|
2007-05-17 18:45:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
|
|
|
|
/// to an zero vector.
|
|
|
|
static bool isZeroShuffle(SDNode *N) {
|
|
|
|
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
|
|
|
|
return false;
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V1 = N->getOperand(0);
|
|
|
|
SDValue V2 = N->getOperand(1);
|
|
|
|
SDValue Mask = N->getOperand(2);
|
2007-05-17 18:45:50 +00:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = Mask.getOperand(i);
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
if (Arg.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Idx = cast<ConstantSDNode>(Arg)->getZExtValue();
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
if (Idx < NumElems) {
|
2008-08-28 21:40:38 +00:00
|
|
|
unsigned Opc = V1.getNode()->getOpcode();
|
|
|
|
if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
continue;
|
|
|
|
if (Opc != ISD::BUILD_VECTOR ||
|
2008-08-28 21:40:38 +00:00
|
|
|
!isZeroNode(V1.getNode()->getOperand(Idx)))
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
return false;
|
|
|
|
} else if (Idx >= NumElems) {
|
2008-08-28 21:40:38 +00:00
|
|
|
unsigned Opc = V2.getNode()->getOpcode();
|
|
|
|
if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
continue;
|
|
|
|
if (Opc != ISD::BUILD_VECTOR ||
|
2008-08-28 21:40:38 +00:00
|
|
|
!isZeroNode(V2.getNode()->getOperand(Idx - NumElems)))
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
return false;
|
2007-05-17 18:45:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getZeroVector - Returns a vector of specified type with all zero elements.
|
|
|
|
///
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
assert(VT.isVector() && "Expected a vector type");
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
|
|
|
|
// Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
|
|
|
|
// type. This ensures they get CSE'd.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Vec;
|
2008-06-06 12:08:01 +00:00
|
|
|
if (VT.getSizeInBits() == 64) { // MMX
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
|
2008-05-15 08:39:06 +00:00
|
|
|
} else if (HasSSE2) { // SSE2
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
|
2008-05-15 08:39:06 +00:00
|
|
|
} else { // SSE1
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
|
2008-05-15 08:39:06 +00:00
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4f32, Cst, Cst, Cst, Cst);
|
|
|
|
}
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getOnesVector - Returns a vector of specified type with all bits set.
|
|
|
|
///
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getOnesVector(MVT VT, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
assert(VT.isVector() && "Expected a vector type");
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
|
|
|
|
// Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
|
|
|
|
// type. This ensures they get CSE'd.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
|
|
|
|
SDValue Vec;
|
2008-06-06 12:08:01 +00:00
|
|
|
if (VT.getSizeInBits() == 64) // MMX
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
|
|
|
|
else // SSE
|
|
|
|
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
|
2007-05-17 18:45:50 +00:00
|
|
|
}
|
|
|
|
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
/// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
|
|
|
|
/// that point to V2 points to its first element.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue NormalizeMask(SDValue Mask, SelectionDAG &DAG) {
|
2006-04-20 08:58:49 +00:00
|
|
|
assert(Mask.getOpcode() == ISD::BUILD_VECTOR);
|
|
|
|
|
|
|
|
bool Changed = false;
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2006-04-20 08:58:49 +00:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
for (unsigned i = 0; i != NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Arg = Mask.getOperand(i);
|
2006-04-20 08:58:49 +00:00
|
|
|
if (Arg.getOpcode() != ISD::UNDEF) {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Arg)->getZExtValue();
|
2006-04-20 08:58:49 +00:00
|
|
|
if (Val > NumElems) {
|
|
|
|
Arg = DAG.getConstant(NumElems, Arg.getValueType());
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
MaskVec.push_back(Arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Changed)
|
2006-08-08 02:23:42 +00:00
|
|
|
Mask = DAG.getNode(ISD::BUILD_VECTOR, Mask.getValueType(),
|
|
|
|
&MaskVec[0], MaskVec.size());
|
2006-04-20 08:58:49 +00:00
|
|
|
return Mask;
|
|
|
|
}
|
|
|
|
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
|
|
|
|
/// operation of specified width.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
|
|
|
MVT BaseVT = MaskVT.getVectorElementType();
|
2006-04-20 08:58:49 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2006-04-20 08:58:49 +00:00
|
|
|
MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
|
|
|
|
for (unsigned i = 1; i != NumElems; ++i)
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, BaseVT));
|
2006-08-08 02:23:42 +00:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
|
2006-04-20 08:58:49 +00:00
|
|
|
}
|
|
|
|
|
2006-04-17 20:43:08 +00:00
|
|
|
/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
|
|
|
|
/// of specified width.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
|
|
|
MVT BaseVT = MaskVT.getVectorElementType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2006-04-17 20:43:08 +00:00
|
|
|
for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, BaseVT));
|
|
|
|
MaskVec.push_back(DAG.getConstant(i + NumElems, BaseVT));
|
|
|
|
}
|
2006-08-08 02:23:42 +00:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
|
2006-04-17 20:43:08 +00:00
|
|
|
}
|
|
|
|
|
2006-04-20 08:58:49 +00:00
|
|
|
/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
|
|
|
|
/// of specified width.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
|
|
|
MVT BaseVT = MaskVT.getVectorElementType();
|
2006-04-20 08:58:49 +00:00
|
|
|
unsigned Half = NumElems/2;
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2006-04-20 08:58:49 +00:00
|
|
|
for (unsigned i = 0; i != Half; ++i) {
|
|
|
|
MaskVec.push_back(DAG.getConstant(i + Half, BaseVT));
|
|
|
|
MaskVec.push_back(DAG.getConstant(i + NumElems + Half, BaseVT));
|
|
|
|
}
|
2006-08-08 02:23:42 +00:00
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
|
2006-04-20 08:58:49 +00:00
|
|
|
}
|
|
|
|
|
Implement a readme entry, compiling
#include <xmmintrin.h>
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
movl $1, %eax
movd %eax, %xmm0
ret
instead of a constant pool load.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48063 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 01:05:04 +00:00
|
|
|
/// getSwapEltZeroMask - Returns a vector_shuffle mask for a shuffle that swaps
|
|
|
|
/// element #0 of a vector with the specified index, leaving the rest of the
|
|
|
|
/// elements in place.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getSwapEltZeroMask(unsigned NumElems, unsigned DestElt,
|
Implement a readme entry, compiling
#include <xmmintrin.h>
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
movl $1, %eax
movd %eax, %xmm0
ret
instead of a constant pool load.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48063 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 01:05:04 +00:00
|
|
|
SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
|
|
|
MVT BaseVT = MaskVT.getVectorElementType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
Implement a readme entry, compiling
#include <xmmintrin.h>
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
movl $1, %eax
movd %eax, %xmm0
ret
instead of a constant pool load.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48063 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 01:05:04 +00:00
|
|
|
// Element #0 of the result gets the elt we are replacing.
|
|
|
|
MaskVec.push_back(DAG.getConstant(DestElt, BaseVT));
|
|
|
|
for (unsigned i = 1; i != NumElems; ++i)
|
|
|
|
MaskVec.push_back(DAG.getConstant(i == DestElt ? 0 : i, BaseVT));
|
|
|
|
return DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], MaskVec.size());
|
|
|
|
}
|
|
|
|
|
2008-04-05 00:30:36 +00:00
|
|
|
/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue PromoteSplat(SDValue Op, SelectionDAG &DAG, bool HasSSE2) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32;
|
|
|
|
MVT VT = Op.getValueType();
|
2008-04-05 00:30:36 +00:00
|
|
|
if (PVT == VT)
|
|
|
|
return Op;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V1 = Op.getOperand(0);
|
|
|
|
SDValue Mask = Op.getOperand(2);
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
2008-04-05 00:30:36 +00:00
|
|
|
// Special handling of v4f32 -> v4i32.
|
|
|
|
if (VT != MVT::v4f32) {
|
|
|
|
Mask = getUnpacklMask(NumElems, DAG);
|
|
|
|
while (NumElems > 4) {
|
|
|
|
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
|
|
|
|
NumElems >>= 1;
|
|
|
|
}
|
2008-05-15 08:39:06 +00:00
|
|
|
Mask = getZeroVector(MVT::v4i32, true, DAG);
|
2006-04-17 20:43:08 +00:00
|
|
|
}
|
|
|
|
|
2008-04-05 00:30:36 +00:00
|
|
|
V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1,
|
2008-04-05 00:30:36 +00:00
|
|
|
DAG.getNode(ISD::UNDEF, PVT), Mask);
|
2006-04-17 20:43:08 +00:00
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
|
|
|
|
}
|
|
|
|
|
2008-09-25 20:50:48 +00:00
|
|
|
/// isVectorLoad - Returns true if the node is a vector load, a scalar
|
|
|
|
/// load that's promoted to vector, or a load bitcasted.
|
|
|
|
static bool isVectorLoad(SDValue Op) {
|
|
|
|
assert(Op.getValueType().isVector() && "Expected a vector type");
|
|
|
|
if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR ||
|
|
|
|
Op.getOpcode() == ISD::BIT_CONVERT) {
|
|
|
|
return isa<LoadSDNode>(Op.getOperand(0));
|
|
|
|
}
|
|
|
|
return isa<LoadSDNode>(Op);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// CanonicalizeMovddup - Cannonicalize movddup shuffle to v2f64.
|
|
|
|
///
|
|
|
|
static SDValue CanonicalizeMovddup(SDValue Op, SDValue V1, SDValue Mask,
|
|
|
|
SelectionDAG &DAG, bool HasSSE3) {
|
|
|
|
// If we have sse3 and shuffle has more than one use or input is a load, then
|
|
|
|
// use movddup. Otherwise, use movlhps.
|
|
|
|
bool UseMovddup = HasSSE3 && (!Op.hasOneUse() || isVectorLoad(V1));
|
|
|
|
MVT PVT = UseMovddup ? MVT::v2f64 : MVT::v4f32;
|
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
if (VT == PVT)
|
|
|
|
return Op;
|
|
|
|
unsigned NumElems = PVT.getVectorNumElements();
|
|
|
|
if (NumElems == 2) {
|
|
|
|
SDValue Cst = DAG.getTargetConstant(0, MVT::i32);
|
|
|
|
Mask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
|
|
|
|
} else {
|
|
|
|
assert(NumElems == 4);
|
|
|
|
SDValue Cst0 = DAG.getTargetConstant(0, MVT::i32);
|
|
|
|
SDValue Cst1 = DAG.getTargetConstant(1, MVT::i32);
|
|
|
|
Mask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst0, Cst1, Cst0, Cst1);
|
|
|
|
}
|
|
|
|
|
|
|
|
V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1);
|
|
|
|
SDValue Shuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, PVT, V1,
|
|
|
|
DAG.getNode(ISD::UNDEF, PVT), Mask);
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, Shuffle);
|
|
|
|
}
|
|
|
|
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27939 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 23:03:30 +00:00
|
|
|
/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
/// vector of zero or undef vector. This produces a shuffle where the low
|
|
|
|
/// element of V2 is swizzled into the zero/undef vector, landing at element
|
|
|
|
/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
|
2008-05-15 08:39:06 +00:00
|
|
|
bool isZero, bool HasSSE2,
|
|
|
|
SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = V2.getValueType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V1 = isZero
|
2008-05-15 08:39:06 +00:00
|
|
|
? getZeroVector(VT, HasSSE2, DAG) : DAG.getNode(ISD::UNDEF, VT);
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned NumElems = V2.getValueType().getVectorNumElements();
|
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
|
|
|
MVT EVT = MaskVT.getVectorElementType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 16> MaskVec;
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
for (unsigned i = 0; i != NumElems; ++i)
|
|
|
|
if (i == Idx) // If this is the insertion idx, put the low elt of V2 here.
|
|
|
|
MaskVec.push_back(DAG.getConstant(NumElems, EVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, EVT));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
2006-08-08 02:23:42 +00:00
|
|
|
&MaskVec[0], MaskVec.size());
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27939 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 23:03:30 +00:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
}
|
|
|
|
|
2008-05-29 08:22:04 +00:00
|
|
|
/// getNumOfConsecutiveZeros - Return the number of elements in a result of
|
|
|
|
/// a shuffle that is zero.
|
|
|
|
static
|
2008-07-27 21:46:04 +00:00
|
|
|
unsigned getNumOfConsecutiveZeros(SDValue Op, SDValue Mask,
|
2008-05-29 08:22:04 +00:00
|
|
|
unsigned NumElems, bool Low,
|
|
|
|
SelectionDAG &DAG) {
|
|
|
|
unsigned NumZeros = 0;
|
|
|
|
for (unsigned i = 0; i < NumElems; ++i) {
|
2008-06-25 20:52:59 +00:00
|
|
|
unsigned Index = Low ? i : NumElems-i-1;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Idx = Mask.getOperand(Index);
|
2008-05-29 08:22:04 +00:00
|
|
|
if (Idx.getOpcode() == ISD::UNDEF) {
|
|
|
|
++NumZeros;
|
|
|
|
continue;
|
|
|
|
}
|
2008-08-28 21:40:38 +00:00
|
|
|
SDValue Elt = DAG.getShuffleScalarElt(Op.getNode(), Index);
|
|
|
|
if (Elt.getNode() && isZeroNode(Elt))
|
2008-05-29 08:22:04 +00:00
|
|
|
++NumZeros;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return NumZeros;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isVectorShift - Returns true if the shuffle can be implemented as a
|
|
|
|
/// logical left or right shift of a vector.
|
2008-07-27 21:46:04 +00:00
|
|
|
static bool isVectorShift(SDValue Op, SDValue Mask, SelectionDAG &DAG,
|
|
|
|
bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
|
2008-05-29 08:22:04 +00:00
|
|
|
unsigned NumElems = Mask.getNumOperands();
|
|
|
|
|
|
|
|
isLeft = true;
|
|
|
|
unsigned NumZeros= getNumOfConsecutiveZeros(Op, Mask, NumElems, true, DAG);
|
|
|
|
if (!NumZeros) {
|
|
|
|
isLeft = false;
|
|
|
|
NumZeros = getNumOfConsecutiveZeros(Op, Mask, NumElems, false, DAG);
|
|
|
|
if (!NumZeros)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool SeenV1 = false;
|
|
|
|
bool SeenV2 = false;
|
|
|
|
for (unsigned i = NumZeros; i < NumElems; ++i) {
|
|
|
|
unsigned Val = isLeft ? (i - NumZeros) : i;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Idx = Mask.getOperand(isLeft ? i : (i - NumZeros));
|
2008-05-29 08:22:04 +00:00
|
|
|
if (Idx.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Index = cast<ConstantSDNode>(Idx)->getZExtValue();
|
2008-05-29 08:22:04 +00:00
|
|
|
if (Index < NumElems)
|
|
|
|
SeenV1 = true;
|
|
|
|
else {
|
|
|
|
Index -= NumElems;
|
|
|
|
SeenV2 = true;
|
|
|
|
}
|
|
|
|
if (Index != Val)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (SeenV1 && SeenV2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ShVal = SeenV1 ? Op.getOperand(0) : Op.getOperand(1);
|
|
|
|
ShAmt = NumZeros;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-04-24 18:01:45 +00:00
|
|
|
/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
|
|
|
|
///
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
|
2006-04-24 18:01:45 +00:00
|
|
|
unsigned NumNonZero, unsigned NumZero,
|
2006-09-08 06:48:29 +00:00
|
|
|
SelectionDAG &DAG, TargetLowering &TLI) {
|
2006-04-24 18:01:45 +00:00
|
|
|
if (NumNonZero > 8)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-24 18:01:45 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V(0, 0);
|
2006-04-24 18:01:45 +00:00
|
|
|
bool First = true;
|
|
|
|
for (unsigned i = 0; i < 16; ++i) {
|
|
|
|
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
|
|
|
|
if (ThisIsNonZero && First) {
|
|
|
|
if (NumZero)
|
2008-05-15 08:39:06 +00:00
|
|
|
V = getZeroVector(MVT::v8i16, true, DAG);
|
2006-04-24 18:01:45 +00:00
|
|
|
else
|
|
|
|
V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
|
|
|
|
First = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((i & 1) != 0) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ThisElt(0, 0), LastElt(0, 0);
|
2006-04-24 18:01:45 +00:00
|
|
|
bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
|
|
|
|
if (LastIsNonZero) {
|
|
|
|
LastElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i-1));
|
|
|
|
}
|
|
|
|
if (ThisIsNonZero) {
|
|
|
|
ThisElt = DAG.getNode(ISD::ZERO_EXTEND, MVT::i16, Op.getOperand(i));
|
|
|
|
ThisElt = DAG.getNode(ISD::SHL, MVT::i16,
|
|
|
|
ThisElt, DAG.getConstant(8, MVT::i8));
|
|
|
|
if (LastIsNonZero)
|
|
|
|
ThisElt = DAG.getNode(ISD::OR, MVT::i16, ThisElt, LastElt);
|
|
|
|
} else
|
|
|
|
ThisElt = LastElt;
|
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (ThisElt.getNode())
|
2006-04-24 18:01:45 +00:00
|
|
|
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, ThisElt,
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(i/2));
|
2006-04-24 18:01:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, MVT::v16i8, V);
|
|
|
|
}
|
|
|
|
|
2007-03-22 18:42:45 +00:00
|
|
|
/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
|
2006-04-24 18:01:45 +00:00
|
|
|
///
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
|
2006-04-24 18:01:45 +00:00
|
|
|
unsigned NumNonZero, unsigned NumZero,
|
2006-09-08 06:48:29 +00:00
|
|
|
SelectionDAG &DAG, TargetLowering &TLI) {
|
2006-04-24 18:01:45 +00:00
|
|
|
if (NumNonZero > 4)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-24 18:01:45 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V(0, 0);
|
2006-04-24 18:01:45 +00:00
|
|
|
bool First = true;
|
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
|
|
|
bool isNonZero = (NonZeros & (1 << i)) != 0;
|
|
|
|
if (isNonZero) {
|
|
|
|
if (First) {
|
|
|
|
if (NumZero)
|
2008-05-15 08:39:06 +00:00
|
|
|
V = getZeroVector(MVT::v8i16, true, DAG);
|
2006-04-24 18:01:45 +00:00
|
|
|
else
|
|
|
|
V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
|
|
|
|
First = false;
|
|
|
|
}
|
|
|
|
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, V, Op.getOperand(i),
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(i));
|
2006-04-24 18:01:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
2008-05-29 08:22:04 +00:00
|
|
|
/// getVShift - Return a vector logical shift node.
|
|
|
|
///
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getVShift(bool isLeft, MVT VT, SDValue SrcOp,
|
2008-05-29 08:22:04 +00:00
|
|
|
unsigned NumBits, SelectionDAG &DAG,
|
|
|
|
const TargetLowering &TLI) {
|
2008-06-06 12:08:01 +00:00
|
|
|
bool isMMX = VT.getSizeInBits() == 64;
|
|
|
|
MVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64;
|
2008-05-29 08:22:04 +00:00
|
|
|
unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
|
|
|
|
SrcOp = DAG.getNode(ISD::BIT_CONVERT, ShVT, SrcOp);
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT,
|
|
|
|
DAG.getNode(Opc, ShVT, SrcOp,
|
2008-08-28 23:19:51 +00:00
|
|
|
DAG.getConstant(NumBits, TLI.getShiftAmountTy())));
|
2008-05-29 08:22:04 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
// All zero's are handled with pxor, all one's are handled with pcmpeqd.
|
2008-08-28 23:19:51 +00:00
|
|
|
if (ISD::isBuildVectorAllZeros(Op.getNode())
|
|
|
|
|| ISD::isBuildVectorAllOnes(Op.getNode())) {
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
// Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to
|
|
|
|
// 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are
|
|
|
|
// eliminated on x86-32 hosts.
|
|
|
|
if (Op.getValueType() == MVT::v4i32 || Op.getValueType() == MVT::v2i32)
|
|
|
|
return Op;
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (ISD::isBuildVectorAllOnes(Op.getNode()))
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
return getOnesVector(Op.getValueType(), DAG);
|
2008-05-15 08:39:06 +00:00
|
|
|
return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG);
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
}
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT EVT = VT.getVectorElementType();
|
|
|
|
unsigned EVTBits = EVT.getSizeInBits();
|
2006-04-25 20:13:52 +00:00
|
|
|
|
|
|
|
unsigned NumElems = Op.getNumOperands();
|
|
|
|
unsigned NumZero = 0;
|
|
|
|
unsigned NumNonZero = 0;
|
|
|
|
unsigned NonZeros = 0;
|
2008-03-08 22:48:29 +00:00
|
|
|
bool IsAllConstants = true;
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallSet<SDValue, 8> Values;
|
2006-04-25 20:13:52 +00:00
|
|
|
for (unsigned i = 0; i < NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = Op.getOperand(i);
|
2007-12-12 06:45:40 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
Values.insert(Elt);
|
|
|
|
if (Elt.getOpcode() != ISD::Constant &&
|
|
|
|
Elt.getOpcode() != ISD::ConstantFP)
|
2008-03-08 22:48:29 +00:00
|
|
|
IsAllConstants = false;
|
2007-12-12 06:45:40 +00:00
|
|
|
if (isZeroNode(Elt))
|
|
|
|
NumZero++;
|
|
|
|
else {
|
|
|
|
NonZeros |= (1 << i);
|
|
|
|
NumNonZero++;
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-25 16:23:39 +00:00
|
|
|
if (NumNonZero == 0) {
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
// All undef vector. Return an UNDEF. All zero vectors were handled above.
|
|
|
|
return DAG.getNode(ISD::UNDEF, VT);
|
2007-06-25 16:23:39 +00:00
|
|
|
}
|
2006-04-25 20:13:52 +00:00
|
|
|
|
Finish implementing a readme entry: when inserting an i64 variable
into a vector of zeros or undef, and when the top part is obviously
zero, we can just use movd + shuffle. This allows us to compile
vec_set-B.ll into:
_test3:
movl $1234567, %eax
andl 4(%esp), %eax
movd %eax, %xmm0
ret
instead of:
_test3:
subl $28, %esp
movl $1234567, %eax
andl 32(%esp), %eax
movl %eax, (%esp)
movl $0, 4(%esp)
movq (%esp), %xmm0
addl $28, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48090 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 05:42:06 +00:00
|
|
|
// Special case for single non-zero, non-undef, element.
|
2007-12-12 06:45:40 +00:00
|
|
|
if (NumNonZero == 1 && NumElems <= 4) {
|
2006-04-25 20:13:52 +00:00
|
|
|
unsigned Idx = CountTrailingZeros_32(NonZeros);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Item = Op.getOperand(Idx);
|
1) Improve comments.
2) Don't try to insert an i64 value into the low part of a
vector with movq on an x86-32 target. This allows us to
compile:
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
_doload64:
movaps LCPI1_0, %xmm0
ret
instead of:
_doload64:
subl $28, %esp
movl $0, 4(%esp)
movl $1, (%esp)
movq (%esp), %xmm0
addl $28, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48057 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-08 22:59:52 +00:00
|
|
|
|
Implement a readme entry, compiling
#include <xmmintrin.h>
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
movl $1, %eax
movd %eax, %xmm0
ret
instead of a constant pool load.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48063 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 01:05:04 +00:00
|
|
|
// If this is an insertion of an i64 value on x86-32, and if the top bits of
|
|
|
|
// the value are obviously zero, truncate the value to i32 and do the
|
|
|
|
// insertion that way. Only do this if the value is non-constant or if the
|
|
|
|
// value is a constant being inserted into element 0. It is cheaper to do
|
|
|
|
// a constant pool load than it is to do a movd + shuffle.
|
|
|
|
if (EVT == MVT::i64 && !Subtarget->is64Bit() &&
|
|
|
|
(!IsAllConstants || Idx == 0)) {
|
|
|
|
if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
|
|
|
|
// Handle MMX and SSE both.
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
|
|
|
|
unsigned VecElts = VT == MVT::v2i64 ? 4 : 2;
|
Implement a readme entry, compiling
#include <xmmintrin.h>
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
movl $1, %eax
movd %eax, %xmm0
ret
instead of a constant pool load.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48063 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 01:05:04 +00:00
|
|
|
|
|
|
|
// Truncate the value (which may itself be a constant) to i32, and
|
|
|
|
// convert it to a vector with movd (S2V+shuffle to zero extend).
|
|
|
|
Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item);
|
|
|
|
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item);
|
2008-05-15 08:39:06 +00:00
|
|
|
Item = getShuffleVectorZeroOrUndef(Item, 0, true,
|
|
|
|
Subtarget->hasSSE2(), DAG);
|
Implement a readme entry, compiling
#include <xmmintrin.h>
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
movl $1, %eax
movd %eax, %xmm0
ret
instead of a constant pool load.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48063 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 01:05:04 +00:00
|
|
|
|
|
|
|
// Now we have our 32-bit value zero extended in the low element of
|
|
|
|
// a vector. If Idx != 0, swizzle it into place.
|
|
|
|
if (Idx != 0) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = {
|
Implement a readme entry, compiling
#include <xmmintrin.h>
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
movl $1, %eax
movd %eax, %xmm0
ret
instead of a constant pool load.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48063 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 01:05:04 +00:00
|
|
|
Item, DAG.getNode(ISD::UNDEF, Item.getValueType()),
|
|
|
|
getSwapEltZeroMask(VecElts, Idx, DAG)
|
|
|
|
};
|
|
|
|
Item = DAG.getNode(ISD::VECTOR_SHUFFLE, VecVT, Ops, 3);
|
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(), Item);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1) Improve comments.
2) Don't try to insert an i64 value into the low part of a
vector with movq on an x86-32 target. This allows us to
compile:
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
_doload64:
movaps LCPI1_0, %xmm0
ret
instead of:
_doload64:
subl $28, %esp
movl $0, 4(%esp)
movl $1, (%esp)
movq (%esp), %xmm0
addl $28, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48057 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-08 22:59:52 +00:00
|
|
|
// If we have a constant or non-constant insertion into the low element of
|
|
|
|
// a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
|
|
|
|
// the rest of the elements. This will be matched as movd/movq/movss/movsd
|
|
|
|
// depending on what the source datatype is. Because we can only get here
|
|
|
|
// when NumElems <= 4, this only needs to handle i32/f32/i64/f64.
|
|
|
|
if (Idx == 0 &&
|
|
|
|
// Don't do this for i64 values on x86-32.
|
|
|
|
(EVT != MVT::i64 || Subtarget->is64Bit())) {
|
2008-03-08 22:48:29 +00:00
|
|
|
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
|
2006-04-25 20:13:52 +00:00
|
|
|
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
|
2008-05-15 08:39:06 +00:00
|
|
|
return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
|
|
|
|
Subtarget->hasSSE2(), DAG);
|
2008-03-08 22:48:29 +00:00
|
|
|
}
|
2008-05-29 08:22:04 +00:00
|
|
|
|
|
|
|
// Is it a vector logical left shift?
|
|
|
|
if (NumElems == 2 && Idx == 1 &&
|
|
|
|
isZeroNode(Op.getOperand(0)) && !isZeroNode(Op.getOperand(1))) {
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned NumBits = VT.getSizeInBits();
|
2008-05-29 08:22:04 +00:00
|
|
|
return getVShift(true, VT,
|
|
|
|
DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(1)),
|
|
|
|
NumBits/2, DAG, *this);
|
|
|
|
}
|
2008-03-08 22:48:29 +00:00
|
|
|
|
|
|
|
if (IsAllConstants) // Otherwise, it's better to do a constpool load.
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
|
1) Improve comments.
2) Don't try to insert an i64 value into the low part of a
vector with movq on an x86-32 target. This allows us to
compile:
__m128i doload64(short x) {return _mm_set_epi16(0,0,0,0,0,0,0,1);}
into:
_doload64:
movaps LCPI1_0, %xmm0
ret
instead of:
_doload64:
subl $28, %esp
movl $0, 4(%esp)
movl $1, (%esp)
movq (%esp), %xmm0
addl $28, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48057 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-08 22:59:52 +00:00
|
|
|
// Otherwise, if this is a vector with i32 or f32 elements, and the element
|
|
|
|
// is a non-constant being inserted into an element other than the low one,
|
|
|
|
// we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
|
|
|
|
// movd/movss) to move this into the low element, then shuffle it into
|
|
|
|
// place.
|
2006-04-25 20:13:52 +00:00
|
|
|
if (EVTBits == 32) {
|
2008-03-08 22:48:29 +00:00
|
|
|
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
|
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
// Turn it into a shuffle of zero and zero-extended scalar to vector.
|
2008-05-15 08:39:06 +00:00
|
|
|
Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
|
|
|
|
Subtarget->hasSSE2(), DAG);
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
|
|
|
MVT MaskEVT = MaskVT.getVectorElementType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2006-04-25 20:13:52 +00:00
|
|
|
for (unsigned i = 0; i < NumElems; i++)
|
|
|
|
MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
2006-08-08 02:23:42 +00:00
|
|
|
&MaskVec[0], MaskVec.size());
|
2006-04-25 20:13:52 +00:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, Item,
|
|
|
|
DAG.getNode(ISD::UNDEF, VT), Mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Finish implementing a readme entry: when inserting an i64 variable
into a vector of zeros or undef, and when the top part is obviously
zero, we can just use movd + shuffle. This allows us to compile
vec_set-B.ll into:
_test3:
movl $1234567, %eax
andl 4(%esp), %eax
movd %eax, %xmm0
ret
instead of:
_test3:
subl $28, %esp
movl $1234567, %eax
andl 32(%esp), %eax
movl %eax, (%esp)
movl $0, 4(%esp)
movq (%esp), %xmm0
addl $28, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48090 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 05:42:06 +00:00
|
|
|
// Splat is obviously ok. Let legalizer expand it to a shuffle.
|
|
|
|
if (Values.size() == 1)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
Finish implementing a readme entry: when inserting an i64 variable
into a vector of zeros or undef, and when the top part is obviously
zero, we can just use movd + shuffle. This allows us to compile
vec_set-B.ll into:
_test3:
movl $1234567, %eax
andl 4(%esp), %eax
movd %eax, %xmm0
ret
instead of:
_test3:
subl $28, %esp
movl $1234567, %eax
andl 32(%esp), %eax
movl %eax, (%esp)
movl $0, 4(%esp)
movq (%esp), %xmm0
addl $28, %esp
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48090 91177308-0d34-0410-b5e6-96231b3b80d8
2008-03-09 05:42:06 +00:00
|
|
|
|
2007-07-24 22:55:08 +00:00
|
|
|
// A vector full of immediates; various special cases are already
|
|
|
|
// handled, so this is best done with a single constant-pool load.
|
2008-03-08 22:48:29 +00:00
|
|
|
if (IsAllConstants)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2007-07-24 22:55:08 +00:00
|
|
|
|
Support for the special case of a vector with the canonical form:
vector_shuffle v1, v2, <2, 6, 3, 7>
I.e.
vector_shuffle v, undef, <2, 2, 3, 3>
MMX only has a shuffle for v4i16 vectors. It needs to use the unpackh for
this type of operation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@36403 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-24 21:16:55 +00:00
|
|
|
// Let legalizer expand 2-wide build_vectors.
|
2008-05-08 00:57:18 +00:00
|
|
|
if (EVTBits == 64) {
|
|
|
|
if (NumNonZero == 1) {
|
|
|
|
// One half is zero or undef.
|
|
|
|
unsigned Idx = CountTrailingZeros_32(NonZeros);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT,
|
2008-05-08 00:57:18 +00:00
|
|
|
Op.getOperand(Idx));
|
2008-05-15 08:39:06 +00:00
|
|
|
return getShuffleVectorZeroOrUndef(V2, Idx, true,
|
|
|
|
Subtarget->hasSSE2(), DAG);
|
2008-05-08 00:57:18 +00:00
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-05-08 00:57:18 +00:00
|
|
|
}
|
2006-04-25 20:13:52 +00:00
|
|
|
|
|
|
|
// If element VT is < 32 bits, convert it to inserts into a zero vector.
|
2007-03-28 00:57:11 +00:00
|
|
|
if (EVTBits == 8 && NumElems == 16) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
|
2006-09-08 06:48:29 +00:00
|
|
|
*this);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (V.getNode()) return V;
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2007-03-28 00:57:11 +00:00
|
|
|
if (EVTBits == 16 && NumElems == 8) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
|
2006-09-08 06:48:29 +00:00
|
|
|
*this);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (V.getNode()) return V;
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If element VT is == 32 bits, turn it into a number of shuffles.
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> V;
|
2007-02-25 07:10:00 +00:00
|
|
|
V.resize(NumElems);
|
2006-04-25 20:13:52 +00:00
|
|
|
if (NumElems == 4 && NumZero > 0) {
|
|
|
|
for (unsigned i = 0; i < 4; ++i) {
|
|
|
|
bool isZero = !(NonZeros & (1 << i));
|
|
|
|
if (isZero)
|
2008-05-15 08:39:06 +00:00
|
|
|
V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
else
|
|
|
|
V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < 2; ++i) {
|
|
|
|
switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
|
|
|
|
default: break;
|
|
|
|
case 0:
|
|
|
|
V[i] = V[i*2]; // Must be a zero vector.
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2+1], V[i*2],
|
|
|
|
getMOVLMask(NumElems, DAG));
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
|
|
|
|
getMOVLMask(NumElems, DAG));
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i*2], V[i*2+1],
|
|
|
|
getUnpacklMask(NumElems, DAG));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
|
|
|
|
MVT EVT = MaskVT.getVectorElementType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2006-04-25 20:13:52 +00:00
|
|
|
bool Reverse = (NonZeros & 0x3) == 2;
|
|
|
|
for (unsigned i = 0; i < 2; ++i)
|
|
|
|
if (Reverse)
|
|
|
|
MaskVec.push_back(DAG.getConstant(1-i, EVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, EVT));
|
|
|
|
Reverse = ((NonZeros & (0x3 << 2)) >> 2) == 2;
|
|
|
|
for (unsigned i = 0; i < 2; ++i)
|
|
|
|
if (Reverse)
|
|
|
|
MaskVec.push_back(DAG.getConstant(1-i+NumElems, EVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(i+NumElems, EVT));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
2006-08-11 17:38:39 +00:00
|
|
|
&MaskVec[0], MaskVec.size());
|
2006-04-25 20:13:52 +00:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[0], V[1], ShufMask);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Values.size() > 2) {
|
|
|
|
// Expand into a number of unpckl*.
|
|
|
|
// e.g. for v4f32
|
|
|
|
// Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
|
|
|
|
// : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
|
|
|
|
// Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue UnpckMask = getUnpacklMask(NumElems, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
for (unsigned i = 0; i < NumElems; ++i)
|
|
|
|
V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
|
|
|
|
NumElems >>= 1;
|
|
|
|
while (NumElems != 0) {
|
|
|
|
for (unsigned i = 0; i < NumElems; ++i)
|
|
|
|
V[i] = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V[i], V[i + NumElems],
|
|
|
|
UnpckMask);
|
|
|
|
NumElems >>= 1;
|
|
|
|
}
|
|
|
|
return V[0];
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2007-12-07 08:07:39 +00:00
|
|
|
static
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LowerVECTOR_SHUFFLEv8i16(SDValue V1, SDValue V2,
|
2008-08-21 22:35:37 +00:00
|
|
|
SDValue PermMask, SelectionDAG &DAG,
|
|
|
|
TargetLowering &TLI) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewV;
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(8);
|
|
|
|
MVT MaskEVT = MaskVT.getVectorElementType();
|
|
|
|
MVT PtrVT = TLI.getPointerTy();
|
2008-08-28 21:40:38 +00:00
|
|
|
SmallVector<SDValue, 8> MaskElts(PermMask.getNode()->op_begin(),
|
|
|
|
PermMask.getNode()->op_end());
|
2007-12-11 01:46:18 +00:00
|
|
|
|
|
|
|
// First record which half of which vector the low elements come from.
|
|
|
|
SmallVector<unsigned, 4> LowQuad(4);
|
|
|
|
for (unsigned i = 0; i < 4; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = MaskElts[i];
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
int QuadIdx = EltIdx / 4;
|
|
|
|
++LowQuad[QuadIdx];
|
|
|
|
}
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
int BestLowQuad = -1;
|
|
|
|
unsigned MaxQuad = 1;
|
|
|
|
for (unsigned i = 0; i < 4; ++i) {
|
|
|
|
if (LowQuad[i] > MaxQuad) {
|
|
|
|
BestLowQuad = i;
|
|
|
|
MaxQuad = LowQuad[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Record which half of which vector the high elements come from.
|
|
|
|
SmallVector<unsigned, 4> HighQuad(4);
|
|
|
|
for (unsigned i = 4; i < 8; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = MaskElts[i];
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
int QuadIdx = EltIdx / 4;
|
|
|
|
++HighQuad[QuadIdx];
|
|
|
|
}
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
int BestHighQuad = -1;
|
|
|
|
MaxQuad = 1;
|
|
|
|
for (unsigned i = 0; i < 4; ++i) {
|
|
|
|
if (HighQuad[i] > MaxQuad) {
|
|
|
|
BestHighQuad = i;
|
|
|
|
MaxQuad = HighQuad[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it's possible to sort parts of either half with PSHUF{H|L}W, then do it.
|
|
|
|
if (BestLowQuad != -1 || BestHighQuad != -1) {
|
|
|
|
// First sort the 4 chunks in order using shufpd.
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
if (BestLowQuad != -1)
|
|
|
|
MaskVec.push_back(DAG.getConstant(BestLowQuad, MVT::i32));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(0, MVT::i32));
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
if (BestHighQuad != -1)
|
|
|
|
MaskVec.push_back(DAG.getConstant(BestHighQuad, MVT::i32));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(1, MVT::i32));
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask= DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec[0],2);
|
2007-12-11 01:46:18 +00:00
|
|
|
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V1),
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, V2), Mask);
|
|
|
|
NewV = DAG.getNode(ISD::BIT_CONVERT, MVT::v8i16, NewV);
|
|
|
|
|
|
|
|
// Now sort high and low parts separately.
|
|
|
|
BitVector InOrder(8);
|
|
|
|
if (BestLowQuad != -1) {
|
|
|
|
// Sort lower half in order using PSHUFLW.
|
|
|
|
MaskVec.clear();
|
|
|
|
bool AnyOutOrder = false;
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = MaskElts[i];
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(Elt);
|
|
|
|
InOrder.set(i);
|
|
|
|
} else {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
if (EltIdx != i)
|
|
|
|
AnyOutOrder = true;
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
MaskVec.push_back(DAG.getConstant(EltIdx % 4, MaskEVT));
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
// If this element is in the right place after this shuffle, then
|
|
|
|
// remember it.
|
|
|
|
if ((int)(EltIdx / 4) == BestLowQuad)
|
|
|
|
InOrder.set(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (AnyOutOrder) {
|
|
|
|
for (unsigned i = 4; i != 8; ++i)
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, MaskEVT));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
|
2007-12-11 01:46:18 +00:00
|
|
|
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BestHighQuad != -1) {
|
|
|
|
// Sort high half in order using PSHUFHW if possible.
|
|
|
|
MaskVec.clear();
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
for (unsigned i = 0; i != 4; ++i)
|
|
|
|
MaskVec.push_back(DAG.getConstant(i, MaskEVT));
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
bool AnyOutOrder = false;
|
|
|
|
for (unsigned i = 4; i != 8; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = MaskElts[i];
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(Elt);
|
|
|
|
InOrder.set(i);
|
|
|
|
} else {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
if (EltIdx != i)
|
|
|
|
AnyOutOrder = true;
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
MaskVec.push_back(DAG.getConstant((EltIdx % 4) + 4, MaskEVT));
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
// If this element is in the right place after this shuffle, then
|
|
|
|
// remember it.
|
|
|
|
if ((int)(EltIdx / 4) == BestHighQuad)
|
|
|
|
InOrder.set(i);
|
|
|
|
}
|
|
|
|
}
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
if (AnyOutOrder) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
|
2007-12-11 01:46:18 +00:00
|
|
|
NewV = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, NewV, NewV, Mask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The other elements are put in the right place using pextrw and pinsrw.
|
|
|
|
for (unsigned i = 0; i != 8; ++i) {
|
|
|
|
if (InOrder[i])
|
|
|
|
continue;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = MaskElts[i];
|
2008-08-21 22:36:36 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ExtOp = (EltIdx < 8)
|
2007-12-11 01:46:18 +00:00
|
|
|
? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
|
|
|
|
DAG.getConstant(EltIdx, PtrVT))
|
|
|
|
: DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2,
|
|
|
|
DAG.getConstant(EltIdx - 8, PtrVT));
|
|
|
|
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
|
|
|
|
DAG.getConstant(i, PtrVT));
|
|
|
|
}
|
2008-08-21 22:35:37 +00:00
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
return NewV;
|
|
|
|
}
|
|
|
|
|
2008-08-21 22:35:37 +00:00
|
|
|
// PSHUF{H|L}W are not used. Lower into extracts and inserts but try to use as
|
|
|
|
// few as possible. First, let's find out how many elements are already in the
|
|
|
|
// right order.
|
2007-12-07 08:07:39 +00:00
|
|
|
unsigned V1InOrder = 0;
|
|
|
|
unsigned V1FromV1 = 0;
|
|
|
|
unsigned V2InOrder = 0;
|
|
|
|
unsigned V2FromV2 = 0;
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> V1Elts;
|
|
|
|
SmallVector<SDValue, 8> V2Elts;
|
2007-12-07 08:07:39 +00:00
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = MaskElts[i];
|
2007-12-07 08:07:39 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
2007-12-11 01:46:18 +00:00
|
|
|
V1Elts.push_back(Elt);
|
|
|
|
V2Elts.push_back(Elt);
|
2007-12-07 08:07:39 +00:00
|
|
|
++V1InOrder;
|
|
|
|
++V2InOrder;
|
2007-12-11 01:46:18 +00:00
|
|
|
continue;
|
|
|
|
}
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
if (EltIdx == i) {
|
|
|
|
V1Elts.push_back(Elt);
|
|
|
|
V2Elts.push_back(DAG.getConstant(i+8, MaskEVT));
|
|
|
|
++V1InOrder;
|
|
|
|
} else if (EltIdx == i+8) {
|
|
|
|
V1Elts.push_back(Elt);
|
|
|
|
V2Elts.push_back(DAG.getConstant(i, MaskEVT));
|
|
|
|
++V2InOrder;
|
|
|
|
} else if (EltIdx < 8) {
|
|
|
|
V1Elts.push_back(Elt);
|
|
|
|
++V1FromV1;
|
2007-12-07 08:07:39 +00:00
|
|
|
} else {
|
2007-12-11 01:46:18 +00:00
|
|
|
V2Elts.push_back(DAG.getConstant(EltIdx-8, MaskEVT));
|
|
|
|
++V2FromV2;
|
2007-12-07 08:07:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (V2InOrder > V1InOrder) {
|
|
|
|
PermMask = CommuteVectorShuffleMask(PermMask, DAG);
|
|
|
|
std::swap(V1, V2);
|
|
|
|
std::swap(V1Elts, V2Elts);
|
|
|
|
std::swap(V1FromV1, V2FromV2);
|
|
|
|
}
|
|
|
|
|
2007-12-11 01:46:18 +00:00
|
|
|
if ((V1FromV1 + V1InOrder) != 8) {
|
|
|
|
// Some elements are from V2.
|
|
|
|
if (V1FromV1) {
|
|
|
|
// If there are elements that are from V1 but out of place,
|
|
|
|
// then first sort them in place
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2007-12-11 01:46:18 +00:00
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = V1Elts[i];
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
continue;
|
|
|
|
}
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
if (EltIdx >= 8)
|
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
else
|
|
|
|
MaskVec.push_back(DAG.getConstant(EltIdx, MaskEVT));
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &MaskVec[0], 8);
|
2007-12-11 01:46:18 +00:00
|
|
|
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v8i16, V1, V1, Mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
NewV = V1;
|
2007-12-07 08:07:39 +00:00
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = V1Elts[i];
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
if (EltIdx < 8)
|
|
|
|
continue;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V2,
|
2007-12-11 01:46:18 +00:00
|
|
|
DAG.getConstant(EltIdx - 8, PtrVT));
|
|
|
|
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
|
|
|
|
DAG.getConstant(i, PtrVT));
|
2007-12-07 08:07:39 +00:00
|
|
|
}
|
2007-12-11 01:46:18 +00:00
|
|
|
return NewV;
|
|
|
|
} else {
|
|
|
|
// All elements are from V1.
|
|
|
|
NewV = V1;
|
|
|
|
for (unsigned i = 0; i < 8; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = V1Elts[i];
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ExtOp = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
|
2007-12-11 01:46:18 +00:00
|
|
|
DAG.getConstant(EltIdx, PtrVT));
|
|
|
|
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, MVT::v8i16, NewV, ExtOp,
|
|
|
|
DAG.getConstant(i, PtrVT));
|
|
|
|
}
|
|
|
|
return NewV;
|
2007-12-07 08:07:39 +00:00
|
|
|
}
|
2007-12-11 01:46:18 +00:00
|
|
|
}
|
2007-12-07 08:07:39 +00:00
|
|
|
|
2007-12-15 03:00:47 +00:00
|
|
|
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
|
|
|
|
/// ones, or rewriting v4i32 / v2f32 as 2 wide ones if possible. This can be
|
|
|
|
/// done when every pair / quad of shuffle mask elements point to elements in
|
|
|
|
/// the right sequence. e.g.
|
2007-12-11 01:46:18 +00:00
|
|
|
/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
|
|
|
|
static
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue RewriteAsNarrowerShuffle(SDValue V1, SDValue V2,
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT,
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue PermMask, SelectionDAG &DAG,
|
2007-12-11 01:46:18 +00:00
|
|
|
TargetLowering &TLI) {
|
|
|
|
unsigned NumElems = PermMask.getNumOperands();
|
2007-12-15 03:00:47 +00:00
|
|
|
unsigned NewWidth = (NumElems == 4) ? 2 : 4;
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
|
2008-07-21 10:20:31 +00:00
|
|
|
MVT MaskEltVT = MaskVT.getVectorElementType();
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT NewVT = MaskVT;
|
|
|
|
switch (VT.getSimpleVT()) {
|
|
|
|
default: assert(false && "Unexpected!");
|
2007-12-15 03:00:47 +00:00
|
|
|
case MVT::v4f32: NewVT = MVT::v2f64; break;
|
|
|
|
case MVT::v4i32: NewVT = MVT::v2i64; break;
|
|
|
|
case MVT::v8i16: NewVT = MVT::v4i32; break;
|
|
|
|
case MVT::v16i8: NewVT = MVT::v4i32; break;
|
|
|
|
}
|
|
|
|
|
2008-02-20 11:22:39 +00:00
|
|
|
if (NewWidth == 2) {
|
2008-06-06 12:08:01 +00:00
|
|
|
if (VT.isInteger())
|
2007-12-15 03:00:47 +00:00
|
|
|
NewVT = MVT::v2i64;
|
|
|
|
else
|
|
|
|
NewVT = MVT::v2f64;
|
2008-02-20 11:22:39 +00:00
|
|
|
}
|
2007-12-15 03:00:47 +00:00
|
|
|
unsigned Scale = NumElems / NewWidth;
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskVec;
|
2007-12-11 01:46:18 +00:00
|
|
|
for (unsigned i = 0; i < NumElems; i += Scale) {
|
|
|
|
unsigned StartIdx = ~0U;
|
|
|
|
for (unsigned j = 0; j < Scale; ++j) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = PermMask.getOperand(i+j);
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
if (StartIdx == ~0U)
|
|
|
|
StartIdx = EltIdx - (EltIdx % Scale);
|
|
|
|
if (EltIdx != StartIdx + j)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
}
|
|
|
|
if (StartIdx == ~0U)
|
2008-07-21 10:20:31 +00:00
|
|
|
MaskVec.push_back(DAG.getNode(ISD::UNDEF, MaskEltVT));
|
2007-12-11 01:46:18 +00:00
|
|
|
else
|
2008-07-21 10:20:31 +00:00
|
|
|
MaskVec.push_back(DAG.getConstant(StartIdx / Scale, MaskEltVT));
|
2007-12-07 08:07:39 +00:00
|
|
|
}
|
2007-12-11 01:46:18 +00:00
|
|
|
|
2007-12-15 03:00:47 +00:00
|
|
|
V1 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V1);
|
|
|
|
V2 = DAG.getNode(ISD::BIT_CONVERT, NewVT, V2);
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, NewVT, V1, V2,
|
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&MaskVec[0], MaskVec.size()));
|
2007-12-07 08:07:39 +00:00
|
|
|
}
|
|
|
|
|
2008-05-09 21:53:03 +00:00
|
|
|
/// getVZextMovL - Return a zero-extending vector move low node.
|
2008-05-08 00:57:18 +00:00
|
|
|
///
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue getVZextMovL(MVT VT, MVT OpVT,
|
|
|
|
SDValue SrcOp, SelectionDAG &DAG,
|
2008-06-06 12:08:01 +00:00
|
|
|
const X86Subtarget *Subtarget) {
|
2008-05-08 00:57:18 +00:00
|
|
|
if (VT == MVT::v2f64 || VT == MVT::v4f32) {
|
|
|
|
LoadSDNode *LD = NULL;
|
2008-08-28 21:40:38 +00:00
|
|
|
if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
|
2008-05-08 00:57:18 +00:00
|
|
|
LD = dyn_cast<LoadSDNode>(SrcOp);
|
|
|
|
if (!LD) {
|
|
|
|
// movssrr and movsdrr do not clear top bits. Try to use movd, movq
|
|
|
|
// instead.
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
|
2008-05-08 00:57:18 +00:00
|
|
|
if ((EVT != MVT::i64 || Subtarget->is64Bit()) &&
|
|
|
|
SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
|
|
|
|
SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT &&
|
|
|
|
SrcOp.getOperand(0).getOperand(0).getValueType() == EVT) {
|
|
|
|
// PR2108
|
|
|
|
OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT,
|
2008-05-09 21:53:03 +00:00
|
|
|
DAG.getNode(X86ISD::VZEXT_MOVL, OpVT,
|
2008-05-08 00:57:18 +00:00
|
|
|
DAG.getNode(ISD::SCALAR_TO_VECTOR, OpVT,
|
2008-08-28 23:19:51 +00:00
|
|
|
SrcOp.getOperand(0)
|
|
|
|
.getOperand(0))));
|
2008-05-08 00:57:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT,
|
2008-05-09 21:53:03 +00:00
|
|
|
DAG.getNode(X86ISD::VZEXT_MOVL, OpVT,
|
2008-05-08 00:57:18 +00:00
|
|
|
DAG.getNode(ISD::BIT_CONVERT, OpVT, SrcOp)));
|
|
|
|
}
|
|
|
|
|
2008-07-22 21:13:36 +00:00
|
|
|
/// LowerVECTOR_SHUFFLE_4wide - Handle all 4 wide cases with a number of
|
|
|
|
/// shuffles.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue
|
|
|
|
LowerVECTOR_SHUFFLE_4wide(SDValue V1, SDValue V2,
|
|
|
|
SDValue PermMask, MVT VT, SelectionDAG &DAG) {
|
2008-07-22 21:13:36 +00:00
|
|
|
MVT MaskVT = PermMask.getValueType();
|
|
|
|
MVT MaskEVT = MaskVT.getVectorElementType();
|
|
|
|
SmallVector<std::pair<int, int>, 8> Locs;
|
2008-08-28 18:32:53 +00:00
|
|
|
Locs.resize(4);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> Mask1(4, DAG.getNode(ISD::UNDEF, MaskEVT));
|
2008-07-22 21:13:36 +00:00
|
|
|
unsigned NumHi = 0;
|
|
|
|
unsigned NumLo = 0;
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = PermMask.getOperand(i);
|
2008-07-22 21:13:36 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
Locs[i] = std::make_pair(-1, -1);
|
|
|
|
} else {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2008-08-04 23:09:15 +00:00
|
|
|
assert(Val < 8 && "Invalid VECTOR_SHUFFLE index!");
|
2008-07-22 21:13:36 +00:00
|
|
|
if (Val < 4) {
|
|
|
|
Locs[i] = std::make_pair(0, NumLo);
|
|
|
|
Mask1[NumLo] = Elt;
|
|
|
|
NumLo++;
|
|
|
|
} else {
|
|
|
|
Locs[i] = std::make_pair(1, NumHi);
|
|
|
|
if (2+NumHi < 4)
|
|
|
|
Mask1[2+NumHi] = Elt;
|
|
|
|
NumHi++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-07-23 00:22:17 +00:00
|
|
|
|
2008-07-22 21:13:36 +00:00
|
|
|
if (NumLo <= 2 && NumHi <= 2) {
|
2008-07-23 00:22:17 +00:00
|
|
|
// If no more than two elements come from either vector. This can be
|
|
|
|
// implemented with two shuffles. First shuffle gather the elements.
|
|
|
|
// The second shuffle, which takes the first shuffle as both of its
|
|
|
|
// vector operands, put the elements into the right order.
|
2008-07-22 21:13:36 +00:00
|
|
|
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
|
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&Mask1[0], Mask1.size()));
|
2008-07-23 00:22:17 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> Mask2(4, DAG.getNode(ISD::UNDEF, MaskEVT));
|
2008-07-22 21:13:36 +00:00
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
|
|
|
if (Locs[i].first == -1)
|
|
|
|
continue;
|
|
|
|
else {
|
|
|
|
unsigned Idx = (i < 2) ? 0 : 4;
|
|
|
|
Idx += Locs[i].first * 2 + Locs[i].second;
|
|
|
|
Mask2[i] = DAG.getConstant(Idx, MaskEVT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1,
|
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&Mask2[0], Mask2.size()));
|
2008-07-23 00:22:17 +00:00
|
|
|
} else if (NumLo == 3 || NumHi == 3) {
|
|
|
|
// Otherwise, we must have three elements from one vector, call it X, and
|
|
|
|
// one element from the other, call it Y. First, use a shufps to build an
|
|
|
|
// intermediate vector with the one element from Y and the element from X
|
|
|
|
// that will be in the same half in the final destination (the indexes don't
|
|
|
|
// matter). Then, use a shufps to build the final vector, taking the half
|
|
|
|
// containing the element from Y from the intermediate, and the other half
|
|
|
|
// from X.
|
|
|
|
if (NumHi == 3) {
|
|
|
|
// Normalize it so the 3 elements come from V1.
|
|
|
|
PermMask = CommuteVectorShuffleMask(PermMask, DAG);
|
|
|
|
std::swap(V1, V2);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the element from V2.
|
|
|
|
unsigned HiIndex;
|
|
|
|
for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = PermMask.getOperand(HiIndex);
|
2008-07-23 00:22:17 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Val = cast<ConstantSDNode>(Elt)->getZExtValue();
|
2008-07-23 00:22:17 +00:00
|
|
|
if (Val >= 4)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Mask1[0] = PermMask.getOperand(HiIndex);
|
|
|
|
Mask1[1] = DAG.getNode(ISD::UNDEF, MaskEVT);
|
|
|
|
Mask1[2] = PermMask.getOperand(HiIndex^1);
|
|
|
|
Mask1[3] = DAG.getNode(ISD::UNDEF, MaskEVT);
|
|
|
|
V2 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
|
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &Mask1[0], 4));
|
|
|
|
|
|
|
|
if (HiIndex >= 2) {
|
|
|
|
Mask1[0] = PermMask.getOperand(0);
|
|
|
|
Mask1[1] = PermMask.getOperand(1);
|
|
|
|
Mask1[2] = DAG.getConstant(HiIndex & 1 ? 6 : 4, MaskEVT);
|
|
|
|
Mask1[3] = DAG.getConstant(HiIndex & 1 ? 4 : 6, MaskEVT);
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
|
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &Mask1[0], 4));
|
|
|
|
} else {
|
|
|
|
Mask1[0] = DAG.getConstant(HiIndex & 1 ? 2 : 0, MaskEVT);
|
|
|
|
Mask1[1] = DAG.getConstant(HiIndex & 1 ? 0 : 2, MaskEVT);
|
|
|
|
Mask1[2] = PermMask.getOperand(2);
|
|
|
|
Mask1[3] = PermMask.getOperand(3);
|
|
|
|
if (Mask1[2].getOpcode() != ISD::UNDEF)
|
2008-09-12 16:56:44 +00:00
|
|
|
Mask1[2] =
|
|
|
|
DAG.getConstant(cast<ConstantSDNode>(Mask1[2])->getZExtValue()+4,
|
|
|
|
MaskEVT);
|
2008-07-23 00:22:17 +00:00
|
|
|
if (Mask1[3].getOpcode() != ISD::UNDEF)
|
2008-09-12 16:56:44 +00:00
|
|
|
Mask1[3] =
|
|
|
|
DAG.getConstant(cast<ConstantSDNode>(Mask1[3])->getZExtValue()+4,
|
|
|
|
MaskEVT);
|
2008-07-23 00:22:17 +00:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V2, V1,
|
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT, &Mask1[0], 4));
|
|
|
|
}
|
2008-07-22 21:13:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Break it into (shuffle shuffle_hi, shuffle_lo).
|
|
|
|
Locs.clear();
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue,8> LoMask(4, DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
SmallVector<SDValue,8> HiMask(4, DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
SmallVector<SDValue,8> *MaskPtr = &LoMask;
|
2008-07-22 21:13:36 +00:00
|
|
|
unsigned MaskIdx = 0;
|
|
|
|
unsigned LoIdx = 0;
|
|
|
|
unsigned HiIdx = 2;
|
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
|
|
|
if (i == 2) {
|
|
|
|
MaskPtr = &HiMask;
|
|
|
|
MaskIdx = 1;
|
|
|
|
LoIdx = 0;
|
|
|
|
HiIdx = 2;
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = PermMask.getOperand(i);
|
2008-07-22 21:13:36 +00:00
|
|
|
if (Elt.getOpcode() == ISD::UNDEF) {
|
|
|
|
Locs[i] = std::make_pair(-1, -1);
|
2008-09-12 16:56:44 +00:00
|
|
|
} else if (cast<ConstantSDNode>(Elt)->getZExtValue() < 4) {
|
2008-07-22 21:13:36 +00:00
|
|
|
Locs[i] = std::make_pair(MaskIdx, LoIdx);
|
|
|
|
(*MaskPtr)[LoIdx] = Elt;
|
|
|
|
LoIdx++;
|
|
|
|
} else {
|
|
|
|
Locs[i] = std::make_pair(MaskIdx, HiIdx);
|
|
|
|
(*MaskPtr)[HiIdx] = Elt;
|
|
|
|
HiIdx++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LoShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
|
2008-07-22 21:13:36 +00:00
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&LoMask[0], LoMask.size()));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue HiShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
|
2008-07-22 21:13:36 +00:00
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&HiMask[0], HiMask.size()));
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MaskOps;
|
2008-07-22 21:13:36 +00:00
|
|
|
for (unsigned i = 0; i != 4; ++i) {
|
|
|
|
if (Locs[i].first == -1) {
|
|
|
|
MaskOps.push_back(DAG.getNode(ISD::UNDEF, MaskEVT));
|
|
|
|
} else {
|
|
|
|
unsigned Idx = Locs[i].first * 4 + Locs[i].second;
|
|
|
|
MaskOps.push_back(DAG.getConstant(Idx, MaskEVT));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, LoShuffle, HiShuffle,
|
|
|
|
DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
|
|
|
&MaskOps[0], MaskOps.size()));
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDValue V1 = Op.getOperand(0);
|
|
|
|
SDValue V2 = Op.getOperand(1);
|
|
|
|
SDValue PermMask = Op.getOperand(2);
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
2006-04-25 20:13:52 +00:00
|
|
|
unsigned NumElems = PermMask.getNumOperands();
|
2008-06-06 12:08:01 +00:00
|
|
|
bool isMMX = VT.getSizeInBits() == 64;
|
2006-04-25 20:13:52 +00:00
|
|
|
bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
|
|
|
|
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
|
2006-10-16 06:36:00 +00:00
|
|
|
bool V1IsSplat = false;
|
|
|
|
bool V2IsSplat = false;
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (isUndefShuffle(Op.getNode()))
|
2006-09-08 01:50:06 +00:00
|
|
|
return DAG.getNode(ISD::UNDEF, VT);
|
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (isZeroShuffle(Op.getNode()))
|
2008-05-15 08:39:06 +00:00
|
|
|
return getZeroVector(VT, Subtarget->hasSSE2(), DAG);
|
2007-05-17 18:45:50 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (isIdentityMask(PermMask.getNode()))
|
2007-06-19 00:02:56 +00:00
|
|
|
return V1;
|
2008-08-28 21:40:38 +00:00
|
|
|
else if (isIdentityMask(PermMask.getNode(), true))
|
2007-06-19 00:02:56 +00:00
|
|
|
return V2;
|
|
|
|
|
2008-09-25 23:35:16 +00:00
|
|
|
// Canonicalize movddup shuffles.
|
|
|
|
if (V2IsUndef && Subtarget->hasSSE2() &&
|
2008-10-06 21:13:08 +00:00
|
|
|
VT.getSizeInBits() == 128 &&
|
2008-09-25 23:35:16 +00:00
|
|
|
X86::isMOVDDUPMask(PermMask.getNode()))
|
|
|
|
return CanonicalizeMovddup(Op, V1, PermMask, DAG, Subtarget->hasSSE3());
|
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (isSplatMask(PermMask.getNode())) {
|
2008-04-05 00:30:36 +00:00
|
|
|
if (isMMX || NumElems < 4) return Op;
|
|
|
|
// Promote it to a v4{if}32 splat.
|
|
|
|
return PromoteSplat(Op, DAG, Subtarget->hasSSE2());
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2007-12-15 03:00:47 +00:00
|
|
|
// If the shuffle can be profitably rewritten as a narrower shuffle, then
|
|
|
|
// do it!
|
|
|
|
if (VT == MVT::v8i16 || VT == MVT::v16i8) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask, DAG, *this);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (NewOp.getNode())
|
2007-12-15 03:00:47 +00:00
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT, LowerVECTOR_SHUFFLE(NewOp, DAG));
|
|
|
|
} else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
|
|
|
|
// FIXME: Figure out a cleaner way to do this.
|
|
|
|
// Try to make use of movq to zero out the top part.
|
2008-08-28 21:40:38 +00:00
|
|
|
if (ISD::isBuildVectorAllZeros(V2.getNode())) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewOp = RewriteAsNarrowerShuffle(V1, V2, VT, PermMask,
|
2008-05-08 00:57:18 +00:00
|
|
|
DAG, *this);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (NewOp.getNode()) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewV1 = NewOp.getOperand(0);
|
|
|
|
SDValue NewV2 = NewOp.getOperand(1);
|
|
|
|
SDValue NewMask = NewOp.getOperand(2);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (isCommutedMOVL(NewMask.getNode(), true, false)) {
|
2007-12-15 03:00:47 +00:00
|
|
|
NewOp = CommuteVectorShuffle(NewOp, NewV1, NewV2, NewMask, DAG);
|
2008-05-09 21:53:03 +00:00
|
|
|
return getVZextMovL(VT, NewOp.getValueType(), NewV2, DAG, Subtarget);
|
2007-12-15 03:00:47 +00:00
|
|
|
}
|
|
|
|
}
|
2008-08-28 21:40:38 +00:00
|
|
|
} else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewOp= RewriteAsNarrowerShuffle(V1, V2, VT, PermMask,
|
2008-05-08 00:57:18 +00:00
|
|
|
DAG, *this);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (NewOp.getNode() && X86::isMOVLMask(NewOp.getOperand(2).getNode()))
|
2008-05-09 21:53:03 +00:00
|
|
|
return getVZextMovL(VT, NewOp.getValueType(), NewOp.getOperand(1),
|
2008-05-08 00:57:18 +00:00
|
|
|
DAG, Subtarget);
|
2007-12-15 03:00:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-29 08:22:04 +00:00
|
|
|
// Check if this can be converted into a logical shift.
|
|
|
|
bool isLeft = false;
|
|
|
|
unsigned ShAmt = 0;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ShVal;
|
2008-05-29 08:22:04 +00:00
|
|
|
bool isShift = isVectorShift(Op, PermMask, DAG, isLeft, ShVal, ShAmt);
|
|
|
|
if (isShift && ShVal.hasOneUse()) {
|
|
|
|
// If the shifted value has multiple uses, it may be cheaper to use
|
|
|
|
// v_set0 + movlhps or movhlps, etc.
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT EVT = VT.getVectorElementType();
|
|
|
|
ShAmt *= EVT.getSizeInBits();
|
2008-05-29 08:22:04 +00:00
|
|
|
return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this);
|
|
|
|
}
|
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (X86::isMOVLMask(PermMask.getNode())) {
|
2008-05-08 00:57:18 +00:00
|
|
|
if (V1IsUndef)
|
|
|
|
return V2;
|
2008-08-28 21:40:38 +00:00
|
|
|
if (ISD::isBuildVectorAllZeros(V1.getNode()))
|
2008-05-09 21:53:03 +00:00
|
|
|
return getVZextMovL(VT, VT, V2, DAG, Subtarget);
|
2008-07-25 19:05:58 +00:00
|
|
|
if (!isMMX)
|
|
|
|
return Op;
|
2008-05-08 00:57:18 +00:00
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (!isMMX && (X86::isMOVSHDUPMask(PermMask.getNode()) ||
|
|
|
|
X86::isMOVSLDUPMask(PermMask.getNode()) ||
|
|
|
|
X86::isMOVHLPSMask(PermMask.getNode()) ||
|
|
|
|
X86::isMOVHPMask(PermMask.getNode()) ||
|
|
|
|
X86::isMOVLPMask(PermMask.getNode())))
|
2006-10-25 20:48:19 +00:00
|
|
|
return Op;
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (ShouldXformToMOVHLPS(PermMask.getNode()) ||
|
|
|
|
ShouldXformToMOVLP(V1.getNode(), V2.getNode(), PermMask.getNode()))
|
2006-10-25 21:49:50 +00:00
|
|
|
return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
2006-10-25 20:48:19 +00:00
|
|
|
|
2008-05-29 08:22:04 +00:00
|
|
|
if (isShift) {
|
|
|
|
// No better options. Use a vshl / vsrl.
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT EVT = VT.getVectorElementType();
|
|
|
|
ShAmt *= EVT.getSizeInBits();
|
2008-05-29 08:22:04 +00:00
|
|
|
return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this);
|
|
|
|
}
|
|
|
|
|
2006-10-25 21:49:50 +00:00
|
|
|
bool Commuted = false;
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
// FIXME: This should also accept a bitcast of a splat? Be careful, not
|
|
|
|
// 1,1,1,1 -> v8i16 though.
|
2008-08-28 21:40:38 +00:00
|
|
|
V1IsSplat = isSplatVector(V1.getNode());
|
|
|
|
V2IsSplat = isSplatVector(V2.getNode());
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44310 91177308-0d34-0410-b5e6-96231b3b80d8
2007-11-25 00:24:49 +00:00
|
|
|
|
|
|
|
// Canonicalize the splat or undef, if present, to be on the RHS.
|
2006-10-25 20:48:19 +00:00
|
|
|
if ((V1IsSplat || V1IsUndef) && !(V2IsSplat || V2IsUndef)) {
|
2006-10-25 21:49:50 +00:00
|
|
|
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
2006-10-25 20:48:19 +00:00
|
|
|
std::swap(V1IsSplat, V2IsSplat);
|
|
|
|
std::swap(V1IsUndef, V2IsUndef);
|
2006-10-25 21:49:50 +00:00
|
|
|
Commuted = true;
|
2006-10-25 20:48:19 +00:00
|
|
|
}
|
|
|
|
|
2007-12-15 03:00:47 +00:00
|
|
|
// FIXME: Figure out a cleaner way to do this.
|
2008-08-28 21:40:38 +00:00
|
|
|
if (isCommutedMOVL(PermMask.getNode(), V2IsSplat, V2IsUndef)) {
|
2006-10-25 20:48:19 +00:00
|
|
|
if (V2IsUndef) return V1;
|
2006-10-25 21:49:50 +00:00
|
|
|
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
2006-10-25 20:48:19 +00:00
|
|
|
if (V2IsSplat) {
|
|
|
|
// V2 is a splat, so the mask may be malformed. That is, it may point
|
|
|
|
// to any V2 element. The instruction selectior won't like this. Get
|
|
|
|
// a corrected mask and commute to form a proper MOVS{S|D}.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewMask = getMOVLMask(NumElems, DAG);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (NewMask.getNode() != PermMask.getNode())
|
2006-10-25 20:48:19 +00:00
|
|
|
Op = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
|
2006-10-04 18:33:38 +00:00
|
|
|
}
|
2006-10-25 20:48:19 +00:00
|
|
|
return Op;
|
2006-10-16 06:36:00 +00:00
|
|
|
}
|
2006-10-04 18:33:38 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (X86::isUNPCKL_v_undef_Mask(PermMask.getNode()) ||
|
|
|
|
X86::isUNPCKH_v_undef_Mask(PermMask.getNode()) ||
|
|
|
|
X86::isUNPCKLMask(PermMask.getNode()) ||
|
|
|
|
X86::isUNPCKHMask(PermMask.getNode()))
|
2006-10-16 06:36:00 +00:00
|
|
|
return Op;
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2006-10-25 20:48:19 +00:00
|
|
|
if (V2IsSplat) {
|
|
|
|
// Normalize mask so all entries that point to V2 points to its first
|
2006-11-21 00:01:06 +00:00
|
|
|
// element then try to match unpck{h|l} again. If match, return a
|
2006-10-25 20:48:19 +00:00
|
|
|
// new vector_shuffle with the corrected mask.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewMask = NormalizeMask(PermMask, DAG);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (NewMask.getNode() != PermMask.getNode()) {
|
|
|
|
if (X86::isUNPCKLMask(PermMask.getNode(), true)) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewMask = getUnpacklMask(NumElems, DAG);
|
2006-10-25 20:48:19 +00:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
|
2008-08-28 21:40:38 +00:00
|
|
|
} else if (X86::isUNPCKHMask(PermMask.getNode(), true)) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewMask = getUnpackhMask(NumElems, DAG);
|
2006-10-25 20:48:19 +00:00
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, NewMask);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Normalize the node to match x86 shuffle ops if needed
|
2008-08-28 21:40:38 +00:00
|
|
|
if (V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(PermMask.getNode()))
|
2006-10-25 21:49:50 +00:00
|
|
|
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
|
|
|
|
|
|
|
if (Commuted) {
|
|
|
|
// Commute is back and try unpck* again.
|
|
|
|
Op = CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (X86::isUNPCKL_v_undef_Mask(PermMask.getNode()) ||
|
|
|
|
X86::isUNPCKH_v_undef_Mask(PermMask.getNode()) ||
|
|
|
|
X86::isUNPCKLMask(PermMask.getNode()) ||
|
|
|
|
X86::isUNPCKHMask(PermMask.getNode()))
|
2006-10-25 21:49:50 +00:00
|
|
|
return Op;
|
|
|
|
}
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-04-05 00:30:36 +00:00
|
|
|
// Try PSHUF* first, then SHUFP*.
|
|
|
|
// MMX doesn't have PSHUFD but it does have PSHUFW. While it's theoretically
|
|
|
|
// possible to shuffle a v2i32 using PSHUFW, that's not yet implemented.
|
2008-08-28 21:40:38 +00:00
|
|
|
if (isMMX && NumElems == 4 && X86::isPSHUFDMask(PermMask.getNode())) {
|
2008-04-05 00:30:36 +00:00
|
|
|
if (V2.getOpcode() != ISD::UNDEF)
|
|
|
|
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1,
|
|
|
|
DAG.getNode(ISD::UNDEF, VT), PermMask);
|
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isMMX) {
|
|
|
|
if (Subtarget->hasSSE2() &&
|
2008-08-28 21:40:38 +00:00
|
|
|
(X86::isPSHUFDMask(PermMask.getNode()) ||
|
|
|
|
X86::isPSHUFHWMask(PermMask.getNode()) ||
|
|
|
|
X86::isPSHUFLWMask(PermMask.getNode()))) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT RVT = VT;
|
2008-04-05 00:30:36 +00:00
|
|
|
if (VT == MVT::v4f32) {
|
|
|
|
RVT = MVT::v4i32;
|
|
|
|
Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, RVT, V1),
|
|
|
|
DAG.getNode(ISD::UNDEF, RVT), PermMask);
|
|
|
|
} else if (V2.getOpcode() != ISD::UNDEF)
|
|
|
|
Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT, V1,
|
|
|
|
DAG.getNode(ISD::UNDEF, RVT), PermMask);
|
|
|
|
if (RVT != VT)
|
|
|
|
Op = DAG.getNode(ISD::BIT_CONVERT, VT, Op);
|
2006-04-25 20:13:52 +00:00
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
|
2008-04-05 00:30:36 +00:00
|
|
|
// Binary or unary shufps.
|
2008-08-28 21:40:38 +00:00
|
|
|
if (X86::isSHUFPMask(PermMask.getNode()) ||
|
|
|
|
(V2.getOpcode() == ISD::UNDEF && X86::isPSHUFDMask(PermMask.getNode())))
|
2006-04-25 20:13:52 +00:00
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
|
2007-12-07 08:07:39 +00:00
|
|
|
// Handle v8i16 specifically since SSE can do byte extraction and insertion.
|
2007-12-11 01:46:18 +00:00
|
|
|
if (VT == MVT::v8i16) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(V1, V2, PermMask, DAG, *this);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (NewOp.getNode())
|
2007-12-11 01:46:18 +00:00
|
|
|
return NewOp;
|
|
|
|
}
|
2007-12-07 08:07:39 +00:00
|
|
|
|
2008-07-22 21:13:36 +00:00
|
|
|
// Handle all 4 wide cases with a number of shuffles except for MMX.
|
|
|
|
if (NumElems == 4 && !isMMX)
|
|
|
|
return LowerVECTOR_SHUFFLE_4wide(V1, V2, PermMask, VT, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
|
2008-02-11 04:19:36 +00:00
|
|
|
SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
if (VT.getSizeInBits() == 8) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32,
|
2008-02-11 04:19:36 +00:00
|
|
|
Op.getOperand(0), Op.getOperand(1));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract,
|
2008-02-11 04:19:36 +00:00
|
|
|
DAG.getValueType(VT));
|
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, Assert);
|
2008-06-06 12:08:01 +00:00
|
|
|
} else if (VT.getSizeInBits() == 16) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32,
|
2008-02-11 04:19:36 +00:00
|
|
|
Op.getOperand(0), Op.getOperand(1));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract,
|
2008-02-11 04:19:36 +00:00
|
|
|
DAG.getValueType(VT));
|
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, Assert);
|
2008-03-24 21:52:23 +00:00
|
|
|
} else if (VT == MVT::f32) {
|
|
|
|
// EXTRACTPS outputs to a GPR32 register which will require a movd to copy
|
|
|
|
// the result back to FR32 register. It's only worth matching if the
|
2008-10-31 00:57:24 +00:00
|
|
|
// result has a single use which is a store or a bitcast to i32. And in
|
|
|
|
// the case of a store, it's not worth it if the index is a constant 0,
|
|
|
|
// because a MOVSSmr can be used instead, which is smaller and faster.
|
2008-03-24 21:52:23 +00:00
|
|
|
if (!Op.hasOneUse())
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-08-28 21:40:38 +00:00
|
|
|
SDNode *User = *Op.getNode()->use_begin();
|
2008-10-31 00:57:24 +00:00
|
|
|
if ((User->getOpcode() != ISD::STORE ||
|
|
|
|
(isa<ConstantSDNode>(Op.getOperand(1)) &&
|
|
|
|
cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
|
2008-04-16 02:32:24 +00:00
|
|
|
(User->getOpcode() != ISD::BIT_CONVERT ||
|
|
|
|
User->getValueType(0) != MVT::i32))
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
|
|
|
SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32,
|
2008-03-24 21:52:23 +00:00
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Op.getOperand(0)),
|
|
|
|
Op.getOperand(1));
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Extract);
|
2008-02-11 04:19:36 +00:00
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-02-11 04:19:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
|
2006-04-25 20:13:52 +00:00
|
|
|
if (!isa<ConstantSDNode>(Op.getOperand(1)))
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-03-24 21:52:23 +00:00
|
|
|
if (Subtarget->hasSSE41()) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (Res.getNode())
|
2008-03-24 21:52:23 +00:00
|
|
|
return Res;
|
|
|
|
}
|
2008-02-11 04:19:36 +00:00
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
2006-04-25 20:13:52 +00:00
|
|
|
// TODO: handle v16i8.
|
2008-06-06 12:08:01 +00:00
|
|
|
if (VT.getSizeInBits() == 16) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Vec = Op.getOperand(0);
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
2007-12-11 01:46:18 +00:00
|
|
|
if (Idx == 0)
|
|
|
|
return DAG.getNode(ISD::TRUNCATE, MVT::i16,
|
|
|
|
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i32,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec),
|
|
|
|
Op.getOperand(1)));
|
2006-04-25 20:13:52 +00:00
|
|
|
// Transform it so it match pextrw which produces a 32-bit result.
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT()+1);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
|
2006-04-25 20:13:52 +00:00
|
|
|
Op.getOperand(0), Op.getOperand(1));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Assert = DAG.getNode(ISD::AssertZext, EVT, Extract,
|
2006-04-25 20:13:52 +00:00
|
|
|
DAG.getValueType(VT));
|
|
|
|
return DAG.getNode(ISD::TRUNCATE, VT, Assert);
|
2008-06-06 12:08:01 +00:00
|
|
|
} else if (VT.getSizeInBits() == 32) {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
if (Idx == 0)
|
|
|
|
return Op;
|
|
|
|
// SHUFPS the element to the lowest double word, then movss.
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(4);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> IdxVec;
|
2007-10-11 19:40:01 +00:00
|
|
|
IdxVec.
|
2008-06-06 12:08:01 +00:00
|
|
|
push_back(DAG.getConstant(Idx, MaskVT.getVectorElementType()));
|
2007-10-11 19:40:01 +00:00
|
|
|
IdxVec.
|
2008-06-06 12:08:01 +00:00
|
|
|
push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType()));
|
2007-10-11 19:40:01 +00:00
|
|
|
IdxVec.
|
2008-06-06 12:08:01 +00:00
|
|
|
push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType()));
|
2007-10-11 19:40:01 +00:00
|
|
|
IdxVec.
|
2008-06-06 12:08:01 +00:00
|
|
|
push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType()));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
2006-08-11 17:38:39 +00:00
|
|
|
&IdxVec[0], IdxVec.size());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Vec = Op.getOperand(0);
|
2006-04-25 20:13:52 +00:00
|
|
|
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
|
Fixed a bug which causes x86 be to incorrectly match
shuffle v, undef, <2, ?, 3, ?>
to movhlps
It should match to unpckhps instead.
Added proper matching code for
shuffle v, undef, <2, 3, 2, 3>
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@31519 91177308-0d34-0410-b5e6-96231b3b80d8
2006-11-07 22:14:24 +00:00
|
|
|
Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
|
2006-04-25 20:13:52 +00:00
|
|
|
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(0));
|
2008-06-06 12:08:01 +00:00
|
|
|
} else if (VT.getSizeInBits() == 64) {
|
2008-02-11 04:19:36 +00:00
|
|
|
// FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
|
|
|
|
// FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
|
|
|
|
// to match extract_elt for f64.
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
if (Idx == 0)
|
|
|
|
return Op;
|
|
|
|
|
|
|
|
// UNPCKHPD the element to the lowest double word, then movsd.
|
|
|
|
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
|
|
|
|
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
|
2008-07-21 10:20:31 +00:00
|
|
|
MVT MaskVT = MVT::getIntVectorWithNumElements(2);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> IdxVec;
|
2008-06-06 12:08:01 +00:00
|
|
|
IdxVec.push_back(DAG.getConstant(1, MaskVT.getVectorElementType()));
|
2007-10-11 19:40:01 +00:00
|
|
|
IdxVec.
|
2008-06-06 12:08:01 +00:00
|
|
|
push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType()));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
|
2006-08-11 17:38:39 +00:00
|
|
|
&IdxVec[0], IdxVec.size());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Vec = Op.getOperand(0);
|
2006-04-25 20:13:52 +00:00
|
|
|
Vec = DAG.getNode(ISD::VECTOR_SHUFFLE, Vec.getValueType(),
|
|
|
|
Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
|
|
|
|
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(0));
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG){
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT EVT = VT.getVectorElementType();
|
2008-02-11 04:19:36 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue N0 = Op.getOperand(0);
|
|
|
|
SDValue N1 = Op.getOperand(1);
|
|
|
|
SDValue N2 = Op.getOperand(2);
|
2008-02-11 04:19:36 +00:00
|
|
|
|
2008-08-14 22:53:18 +00:00
|
|
|
if ((EVT.getSizeInBits() == 8 || EVT.getSizeInBits() == 16) &&
|
|
|
|
isa<ConstantSDNode>(N2)) {
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned Opc = (EVT.getSizeInBits() == 8) ? X86ISD::PINSRB
|
2008-02-11 04:19:36 +00:00
|
|
|
: X86ISD::PINSRW;
|
|
|
|
// Transform it so it match pinsr{b,w} which expects a GR32 as its second
|
|
|
|
// argument.
|
|
|
|
if (N1.getValueType() != MVT::i32)
|
|
|
|
N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
|
|
|
|
if (N2.getValueType() != MVT::i32)
|
2008-09-12 16:56:44 +00:00
|
|
|
N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
|
2008-02-11 04:19:36 +00:00
|
|
|
return DAG.getNode(Opc, VT, N0, N1, N2);
|
2008-08-14 22:43:26 +00:00
|
|
|
} else if (EVT == MVT::f32 && isa<ConstantSDNode>(N2)) {
|
2008-02-11 04:19:36 +00:00
|
|
|
// Bits [7:6] of the constant are the source select. This will always be
|
|
|
|
// zero here. The DAG Combiner may combine an extract_elt index into these
|
|
|
|
// bits. For example (insert (extract, 3), 2) could be matched by putting
|
|
|
|
// the '3' into bits [7:6] of X86ISD::INSERTPS.
|
|
|
|
// Bits [5:4] of the constant are the destination select. This is the
|
|
|
|
// value of the incoming immediate.
|
|
|
|
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
|
|
|
|
// combine either bitwise AND or insert of float 0.0 to set these bits.
|
2008-09-12 16:56:44 +00:00
|
|
|
N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue() << 4);
|
2008-02-11 04:19:36 +00:00
|
|
|
return DAG.getNode(X86ISD::INSERTPS, VT, N0, N1, N2);
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-02-11 04:19:36 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT EVT = VT.getVectorElementType();
|
2008-02-11 04:19:36 +00:00
|
|
|
|
|
|
|
if (Subtarget->hasSSE41())
|
|
|
|
return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
|
|
|
|
|
2007-12-12 07:55:34 +00:00
|
|
|
if (EVT == MVT::i8)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2007-12-12 07:55:34 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue N0 = Op.getOperand(0);
|
|
|
|
SDValue N1 = Op.getOperand(1);
|
|
|
|
SDValue N2 = Op.getOperand(2);
|
2007-12-12 07:55:34 +00:00
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
if (EVT.getSizeInBits() == 16) {
|
2007-12-12 07:55:34 +00:00
|
|
|
// Transform it so it match pinsrw which expects a 16-bit value in a GR32
|
|
|
|
// as its second argument.
|
2006-04-25 20:13:52 +00:00
|
|
|
if (N1.getValueType() != MVT::i32)
|
|
|
|
N1 = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, N1);
|
|
|
|
if (N2.getValueType() != MVT::i32)
|
2008-09-12 16:56:44 +00:00
|
|
|
N2 = DAG.getIntPtrConstant(cast<ConstantSDNode>(N2)->getZExtValue());
|
2006-04-25 20:13:52 +00:00
|
|
|
return DAG.getNode(X86ISD::PINSRW, VT, N0, N1, N2);
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
|
2008-07-22 18:39:19 +00:00
|
|
|
if (Op.getValueType() == MVT::v2f32)
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, MVT::v2f32,
|
|
|
|
DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2i32,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::i32,
|
|
|
|
Op.getOperand(0))));
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = MVT::v2i32;
|
|
|
|
switch (Op.getValueType().getSimpleVT()) {
|
- When DAG combiner is folding a bit convert into a BUILD_VECTOR, it should check if it's essentially a SCALAR_TO_VECTOR. Avoid turning (v8i16) <10, u, u, u> to <10, 0, u, u, u, u, u, u>. Instead, simply convert it to a SCALAR_TO_VECTOR of the proper type.
- X86 now normalize SCALAR_TO_VECTOR to (BIT_CONVERT (v4i32 SCALAR_TO_VECTOR)). Get rid of X86ISD::S2VEC.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47290 91177308-0d34-0410-b5e6-96231b3b80d8
2008-02-18 23:04:32 +00:00
|
|
|
default: break;
|
|
|
|
case MVT::v16i8:
|
|
|
|
case MVT::v8i16:
|
|
|
|
VT = MVT::v4i32;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, Op.getValueType(),
|
|
|
|
DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, AnyExt));
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-09-16 21:48:12 +00:00
|
|
|
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
|
|
|
|
// their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
|
|
|
|
// one of the above mentioned nodes. It has to be wrapped because otherwise
|
|
|
|
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
|
|
|
|
// be used to form addressing mode. These wrapped nodes will be selected
|
|
|
|
// into MOV32ri.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
|
2006-04-25 20:13:52 +00:00
|
|
|
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(),
|
2006-11-29 23:19:46 +00:00
|
|
|
getPointerTy(),
|
|
|
|
CP->getAlignment());
|
2006-12-05 04:01:03 +00:00
|
|
|
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
|
2007-01-12 19:20:47 +00:00
|
|
|
// With PIC, the address is actually $g + Offset.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
!Subtarget->isPICStyleRIPRel()) {
|
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(),
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
Result);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
2008-09-24 00:05:32 +00:00
|
|
|
X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV,
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
int64_t Offset,
|
2008-09-24 00:05:32 +00:00
|
|
|
SelectionDAG &DAG) const {
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
bool IsPic = getTargetMachine().getRelocationModel() == Reloc::PIC_;
|
|
|
|
bool ExtraLoadRequired =
|
|
|
|
Subtarget->GVRequiresExtraLoad(GV, getTargetMachine(), false);
|
|
|
|
|
|
|
|
// Create the TargetGlobalAddress node, folding in the constant
|
|
|
|
// offset if it is legal.
|
|
|
|
SDValue Result;
|
2008-10-21 03:38:42 +00:00
|
|
|
if (!IsPic && !ExtraLoadRequired && isInt32(Offset)) {
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
|
|
|
|
Offset = 0;
|
|
|
|
} else
|
|
|
|
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0);
|
2006-12-05 04:01:03 +00:00
|
|
|
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
|
2007-01-12 19:20:47 +00:00
|
|
|
// With PIC, the address is actually $g + Offset.
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
if (IsPic && !Subtarget->isPICStyleRIPRel()) {
|
2007-01-12 19:20:47 +00:00
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(),
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
Result);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
2006-12-22 22:29:05 +00:00
|
|
|
|
|
|
|
// For Darwin & Mingw32, external and weak symbols are indirect, so we want to
|
|
|
|
// load the value at address GV, not the value of GV itself. This means that
|
|
|
|
// the GlobalAddress must be in the base or index register of the address, not
|
|
|
|
// the GV offset field. Platform check is inside GVRequiresExtraLoad() call
|
2007-01-12 19:20:47 +00:00
|
|
|
// The same applies for external symbols during PIC codegen
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
if (ExtraLoadRequired)
|
2008-02-06 22:27:42 +00:00
|
|
|
Result = DAG.getLoad(getPointerTy(), DAG.getEntryNode(), Result,
|
2008-02-07 18:41:25 +00:00
|
|
|
PseudoSourceValue::getGOT(), 0);
|
2006-04-25 20:13:52 +00:00
|
|
|
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
// If there was a non-zero offset that we didn't fold, create an explicit
|
|
|
|
// addition for it.
|
|
|
|
if (Offset != 0)
|
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(), Result,
|
|
|
|
DAG.getConstant(Offset, getPointerTy()));
|
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2008-09-24 00:05:32 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
|
|
|
|
return LowerGlobalAddress(GV, Offset, DAG);
|
2008-09-24 00:05:32 +00:00
|
|
|
}
|
|
|
|
|
2008-05-04 21:36:32 +00:00
|
|
|
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue
|
2008-05-04 21:36:32 +00:00
|
|
|
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
|
2008-06-06 12:08:01 +00:00
|
|
|
const MVT PtrVT) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InFlag;
|
|
|
|
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX,
|
2007-04-20 21:38:10 +00:00
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg,
|
|
|
|
PtrVT), InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
// emit leal symbol@TLSGD(,%ebx,1), %eax
|
|
|
|
SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
|
2007-04-20 21:38:10 +00:00
|
|
|
GA->getValueType(0),
|
|
|
|
GA->getOffset());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { Chain, TGA, InFlag };
|
|
|
|
SDValue Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 3);
|
2007-04-20 21:38:10 +00:00
|
|
|
InFlag = Result.getValue(2);
|
|
|
|
Chain = Result.getValue(1);
|
|
|
|
|
|
|
|
// call ___tls_get_addr. This function receives its argument in
|
|
|
|
// the register EAX.
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::EAX, Result, InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops1[] = { Chain,
|
2008-09-16 21:48:12 +00:00
|
|
|
DAG.getTargetExternalSymbol("___tls_get_addr",
|
|
|
|
PtrVT),
|
2007-04-20 21:38:10 +00:00
|
|
|
DAG.getRegister(X86::EAX, PtrVT),
|
|
|
|
DAG.getRegister(X86::EBX, PtrVT),
|
|
|
|
InFlag };
|
|
|
|
Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 5);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
return DAG.getCopyFromReg(Chain, X86::EAX, PtrVT, InFlag);
|
|
|
|
}
|
|
|
|
|
2008-05-04 21:36:32 +00:00
|
|
|
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue
|
2008-05-04 21:36:32 +00:00
|
|
|
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
|
2008-06-06 12:08:01 +00:00
|
|
|
const MVT PtrVT) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InFlag, Chain;
|
2008-05-04 21:36:32 +00:00
|
|
|
|
|
|
|
// emit leaq symbol@TLSGD(%rip), %rdi
|
|
|
|
SDVTList NodeTys = DAG.getVTList(PtrVT, MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
|
2008-05-04 21:36:32 +00:00
|
|
|
GA->getValueType(0),
|
|
|
|
GA->getOffset());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { DAG.getEntryNode(), TGA};
|
|
|
|
SDValue Result = DAG.getNode(X86ISD::TLSADDR, NodeTys, Ops, 2);
|
2008-05-04 21:36:32 +00:00
|
|
|
Chain = Result.getValue(1);
|
|
|
|
InFlag = Result.getValue(2);
|
|
|
|
|
2008-08-16 12:58:29 +00:00
|
|
|
// call __tls_get_addr. This function receives its argument in
|
2008-05-04 21:36:32 +00:00
|
|
|
// the register RDI.
|
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::RDI, Result, InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops1[] = { Chain,
|
2008-09-16 21:48:12 +00:00
|
|
|
DAG.getTargetExternalSymbol("__tls_get_addr",
|
|
|
|
PtrVT),
|
2008-05-04 21:36:32 +00:00
|
|
|
DAG.getRegister(X86::RDI, PtrVT),
|
|
|
|
InFlag };
|
|
|
|
Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops1, 4);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
|
|
|
|
return DAG.getCopyFromReg(Chain, X86::RAX, PtrVT, InFlag);
|
|
|
|
}
|
|
|
|
|
2007-04-20 21:38:10 +00:00
|
|
|
// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
|
|
|
|
// "local exec" model.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
|
2008-06-06 12:08:01 +00:00
|
|
|
const MVT PtrVT) {
|
2007-04-20 21:38:10 +00:00
|
|
|
// Get the Thread Pointer
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT);
|
2007-04-20 21:38:10 +00:00
|
|
|
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
|
|
|
|
// exec)
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
|
2007-04-20 21:38:10 +00:00
|
|
|
GA->getValueType(0),
|
|
|
|
GA->getOffset());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Offset = DAG.getNode(X86ISD::Wrapper, PtrVT, TGA);
|
2007-04-22 22:50:52 +00:00
|
|
|
|
|
|
|
if (GA->getGlobal()->isDeclaration()) // initial exec TLS model
|
2008-02-06 22:27:42 +00:00
|
|
|
Offset = DAG.getLoad(PtrVT, DAG.getEntryNode(), Offset,
|
2008-02-07 18:41:25 +00:00
|
|
|
PseudoSourceValue::getGOT(), 0);
|
2007-04-22 22:50:52 +00:00
|
|
|
|
2007-04-20 21:38:10 +00:00
|
|
|
// The address of the thread local variable is the add of the thread
|
|
|
|
// pointer with the offset of the variable.
|
|
|
|
return DAG.getNode(ISD::ADD, PtrVT, ThreadPointer, Offset);
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
|
2007-04-20 21:38:10 +00:00
|
|
|
// TODO: implement the "local dynamic" model
|
2007-04-21 20:56:26 +00:00
|
|
|
// TODO: implement the "initial exec"model for pic executables
|
2008-05-04 21:36:32 +00:00
|
|
|
assert(Subtarget->isTargetELF() &&
|
|
|
|
"TLS not implemented for non-ELF targets");
|
2007-04-20 21:38:10 +00:00
|
|
|
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
|
|
|
|
// If the relocation model is PIC, use the "General Dynamic" TLS Model,
|
|
|
|
// otherwise use the "Local Exec"TLS Model
|
2008-05-04 21:36:32 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
|
|
|
|
} else {
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_)
|
|
|
|
return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
|
|
|
|
else
|
|
|
|
return LowerToTLSExecModel(GA, DAG, getPointerTy());
|
|
|
|
}
|
2007-04-20 21:38:10 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
|
2008-09-16 21:48:12 +00:00
|
|
|
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
|
|
|
|
SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
|
2006-12-05 04:01:03 +00:00
|
|
|
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
|
2007-01-12 19:20:47 +00:00
|
|
|
// With PIC, the address is actually $g + Offset.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
!Subtarget->isPICStyleRIPRel()) {
|
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(),
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
Result);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
|
2007-01-12 19:20:47 +00:00
|
|
|
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy());
|
2007-01-12 19:20:47 +00:00
|
|
|
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
|
|
|
|
// With PIC, the address is actually $g + Offset.
|
|
|
|
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
!Subtarget->isPICStyleRIPRel()) {
|
|
|
|
Result = DAG.getNode(ISD::ADD, getPointerTy(),
|
|
|
|
DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
|
|
|
|
Result);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2007-10-17 06:02:13 +00:00
|
|
|
/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and
|
|
|
|
/// take a 2 x i32 value to shift plus a shift amount.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
|
2008-03-03 22:22:09 +00:00
|
|
|
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
unsigned VTBits = VT.getSizeInBits();
|
2007-10-17 06:02:13 +00:00
|
|
|
bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ShOpLo = Op.getOperand(0);
|
|
|
|
SDValue ShOpHi = Op.getOperand(1);
|
|
|
|
SDValue ShAmt = Op.getOperand(2);
|
|
|
|
SDValue Tmp1 = isSRA ?
|
2008-03-03 22:22:09 +00:00
|
|
|
DAG.getNode(ISD::SRA, VT, ShOpHi, DAG.getConstant(VTBits - 1, MVT::i8)) :
|
|
|
|
DAG.getConstant(0, VT);
|
2007-10-17 06:02:13 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Tmp2, Tmp3;
|
2007-10-17 06:02:13 +00:00
|
|
|
if (Op.getOpcode() == ISD::SHL_PARTS) {
|
2008-03-03 22:22:09 +00:00
|
|
|
Tmp2 = DAG.getNode(X86ISD::SHLD, VT, ShOpHi, ShOpLo, ShAmt);
|
|
|
|
Tmp3 = DAG.getNode(ISD::SHL, VT, ShOpLo, ShAmt);
|
2007-10-17 06:02:13 +00:00
|
|
|
} else {
|
2008-03-03 22:22:09 +00:00
|
|
|
Tmp2 = DAG.getNode(X86ISD::SHRD, VT, ShOpLo, ShOpHi, ShAmt);
|
|
|
|
Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt);
|
2007-10-17 06:02:13 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
|
2008-03-03 22:22:09 +00:00
|
|
|
DAG.getConstant(VTBits, MVT::i8));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cond = DAG.getNode(X86ISD::CMP, VT,
|
2007-10-17 06:02:13 +00:00
|
|
|
AndNode, DAG.getConstant(0, MVT::i8));
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Hi, Lo;
|
|
|
|
SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
|
|
|
|
SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
|
|
|
|
SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
|
2006-01-09 18:33:28 +00:00
|
|
|
|
2008-06-30 10:19:09 +00:00
|
|
|
if (Op.getOpcode() == ISD::SHL_PARTS) {
|
|
|
|
Hi = DAG.getNode(X86ISD::CMOV, VT, Ops0, 4);
|
|
|
|
Lo = DAG.getNode(X86ISD::CMOV, VT, Ops1, 4);
|
2007-10-17 06:02:13 +00:00
|
|
|
} else {
|
2008-06-30 10:19:09 +00:00
|
|
|
Lo = DAG.getNode(X86ISD::CMOV, VT, Ops0, 4);
|
|
|
|
Hi = DAG.getNode(X86ISD::CMOV, VT, Ops1, 4);
|
2007-10-17 06:02:13 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[2] = { Lo, Hi };
|
2008-07-02 17:40:58 +00:00
|
|
|
return DAG.getMergeValues(Ops, 2);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT SrcVT = Op.getOperand(0).getValueType();
|
2008-06-08 20:54:56 +00:00
|
|
|
assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
|
2008-02-27 05:57:41 +00:00
|
|
|
"Unknown SINT_TO_FP to lower!");
|
|
|
|
|
|
|
|
// These are really Legal; caller falls through into that case.
|
|
|
|
if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-02-27 05:57:41 +00:00
|
|
|
if (SrcVT == MVT::i64 && Op.getValueType() != MVT::f80 &&
|
|
|
|
Subtarget->is64Bit())
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-02-27 05:57:41 +00:00
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned Size = SrcVT.getSizeInBits()/8;
|
2006-04-25 20:13:52 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
|
|
|
SDValue Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0),
|
2008-02-06 22:27:42 +00:00
|
|
|
StackSlot,
|
2008-07-11 22:44:52 +00:00
|
|
|
PseudoSourceValue::getFixedStack(SSFI), 0);
|
2006-04-25 20:13:52 +00:00
|
|
|
|
|
|
|
// Build the FILD
|
2007-02-25 07:10:00 +00:00
|
|
|
SDVTList Tys;
|
2008-01-16 06:24:21 +00:00
|
|
|
bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
|
2007-09-14 22:26:36 +00:00
|
|
|
if (useSSE)
|
2007-02-25 07:10:00 +00:00
|
|
|
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
|
|
|
|
else
|
2007-07-03 00:53:03 +00:00
|
|
|
Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> Ops;
|
2006-04-25 20:13:52 +00:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(StackSlot);
|
|
|
|
Ops.push_back(DAG.getValueType(SrcVT));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD,
|
2008-02-27 05:57:41 +00:00
|
|
|
Tys, &Ops[0], Ops.size());
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2007-09-14 22:26:36 +00:00
|
|
|
if (useSSE) {
|
2006-04-25 20:13:52 +00:00
|
|
|
Chain = Result.getValue(1);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InFlag = Result.getValue(2);
|
2006-04-25 20:13:52 +00:00
|
|
|
|
|
|
|
// FIXME: Currently the FST is flagged to the FILD_FLAG. This
|
|
|
|
// shouldn't be necessary except that RFP cannot be live across
|
|
|
|
// multiple blocks. When stackifier is fixed, they can be uncoupled.
|
2005-11-15 00:40:23 +00:00
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2006-04-25 20:13:52 +00:00
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(8, 8);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
2007-02-25 07:10:00 +00:00
|
|
|
Tys = DAG.getVTList(MVT::Other);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> Ops;
|
2006-01-12 22:54:21 +00:00
|
|
|
Ops.push_back(Chain);
|
2006-04-25 20:13:52 +00:00
|
|
|
Ops.push_back(Result);
|
2005-11-15 00:40:23 +00:00
|
|
|
Ops.push_back(StackSlot);
|
2006-04-25 20:13:52 +00:00
|
|
|
Ops.push_back(DAG.getValueType(Op.getValueType()));
|
|
|
|
Ops.push_back(InFlag);
|
2006-08-08 02:23:42 +00:00
|
|
|
Chain = DAG.getNode(X86ISD::FST, Tys, &Ops[0], Ops.size());
|
2008-02-06 22:27:42 +00:00
|
|
|
Result = DAG.getLoad(Op.getValueType(), Chain, StackSlot,
|
2008-07-11 22:44:52 +00:00
|
|
|
PseudoSourceValue::getFixedStack(SSFI), 0);
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
return Result;
|
|
|
|
}
|
|
|
|
|
2008-10-21 20:50:01 +00:00
|
|
|
SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
MVT SrcVT = Op.getOperand(0).getValueType();
|
|
|
|
assert(SrcVT.getSimpleVT() == MVT::i64 && "Unknown UINT_TO_FP to lower!");
|
|
|
|
|
|
|
|
// We only handle SSE2 f64 target here; caller can handle the rest.
|
|
|
|
if (Op.getValueType() != MVT::f64 || !X86ScalarSSEf64)
|
|
|
|
return SDValue();
|
|
|
|
|
2008-10-21 23:07:49 +00:00
|
|
|
// This algorithm is not obvious. Here it is in C code, more or less:
|
|
|
|
/*
|
|
|
|
double uint64_to_double( uint32_t hi, uint32_t lo )
|
|
|
|
{
|
|
|
|
static const __m128i exp = { 0x4330000045300000ULL, 0 };
|
|
|
|
static const __m128d bias = { 0x1.0p84, 0x1.0p52 };
|
|
|
|
|
2008-10-22 00:02:32 +00:00
|
|
|
// copy ints to xmm registers
|
2008-10-21 23:07:49 +00:00
|
|
|
__m128i xh = _mm_cvtsi32_si128( hi );
|
|
|
|
__m128i xl = _mm_cvtsi32_si128( lo );
|
|
|
|
|
2008-10-22 00:02:32 +00:00
|
|
|
// combine into low half of a single xmm register
|
2008-10-21 23:07:49 +00:00
|
|
|
__m128i x = _mm_unpacklo_epi32( xh, xl );
|
|
|
|
__m128d d;
|
|
|
|
double sd;
|
|
|
|
|
2008-10-22 00:02:32 +00:00
|
|
|
// merge in appropriate exponents to give the integer bits the
|
2008-10-21 23:07:49 +00:00
|
|
|
// right magnitude
|
|
|
|
x = _mm_unpacklo_epi32( x, exp );
|
|
|
|
|
2008-10-22 00:02:32 +00:00
|
|
|
// subtract away the biases to deal with the IEEE-754 double precision
|
|
|
|
// implicit 1
|
2008-10-21 23:07:49 +00:00
|
|
|
d = _mm_sub_pd( (__m128d) x, bias );
|
|
|
|
|
2008-10-22 00:02:32 +00:00
|
|
|
// All conversions up to here are exact. The correctly rounded result is
|
2008-10-21 23:07:49 +00:00
|
|
|
// calculated using the
|
2008-10-22 00:02:32 +00:00
|
|
|
// current rounding mode using the following horizontal add.
|
2008-10-21 23:07:49 +00:00
|
|
|
d = _mm_add_sd( d, _mm_unpackhi_pd( d, d ) );
|
|
|
|
_mm_store_sd( &sd, d ); //since we are returning doubles in XMM, this
|
2008-10-22 00:02:32 +00:00
|
|
|
// store doesn't really need to be here (except maybe to zero the other
|
|
|
|
// double)
|
2008-10-21 23:07:49 +00:00
|
|
|
return sd;
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
|
2008-10-21 20:50:01 +00:00
|
|
|
// Build some magic constants.
|
|
|
|
std::vector<Constant*>CV0;
|
|
|
|
CV0.push_back(ConstantInt::get(APInt(32, 0x45300000)));
|
|
|
|
CV0.push_back(ConstantInt::get(APInt(32, 0x43300000)));
|
|
|
|
CV0.push_back(ConstantInt::get(APInt(32, 0)));
|
|
|
|
CV0.push_back(ConstantInt::get(APInt(32, 0)));
|
|
|
|
Constant *C0 = ConstantVector::get(CV0);
|
|
|
|
SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 4);
|
|
|
|
|
|
|
|
std::vector<Constant*>CV1;
|
|
|
|
CV1.push_back(ConstantFP::get(APFloat(APInt(64, 0x4530000000000000ULL))));
|
|
|
|
CV1.push_back(ConstantFP::get(APFloat(APInt(64, 0x4330000000000000ULL))));
|
|
|
|
Constant *C1 = ConstantVector::get(CV1);
|
|
|
|
SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 4);
|
|
|
|
|
|
|
|
SmallVector<SDValue, 4> MaskVec;
|
|
|
|
MaskVec.push_back(DAG.getConstant(0, MVT::i32));
|
|
|
|
MaskVec.push_back(DAG.getConstant(4, MVT::i32));
|
|
|
|
MaskVec.push_back(DAG.getConstant(1, MVT::i32));
|
|
|
|
MaskVec.push_back(DAG.getConstant(5, MVT::i32));
|
|
|
|
SDValue UnpcklMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, &MaskVec[0],
|
|
|
|
MaskVec.size());
|
|
|
|
SmallVector<SDValue, 4> MaskVec2;
|
2008-10-22 11:24:12 +00:00
|
|
|
MaskVec2.push_back(DAG.getConstant(1, MVT::i32));
|
|
|
|
MaskVec2.push_back(DAG.getConstant(0, MVT::i32));
|
|
|
|
SDValue ShufMask = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, &MaskVec2[0],
|
2008-10-21 20:50:01 +00:00
|
|
|
MaskVec2.size());
|
|
|
|
|
|
|
|
SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4i32,
|
2008-10-22 11:24:12 +00:00
|
|
|
DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
|
|
|
|
Op.getOperand(0),
|
|
|
|
DAG.getIntPtrConstant(1)));
|
2008-10-21 20:50:01 +00:00
|
|
|
SDValue XR2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v4i32,
|
2008-10-22 11:24:12 +00:00
|
|
|
DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
|
|
|
|
Op.getOperand(0),
|
|
|
|
DAG.getIntPtrConstant(0)));
|
2008-10-21 20:50:01 +00:00
|
|
|
SDValue Unpck1 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32,
|
|
|
|
XR1, XR2, UnpcklMask);
|
|
|
|
SDValue CLod0 = DAG.getLoad(MVT::v4i32, DAG.getEntryNode(), CPIdx0,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0, false, 16);
|
|
|
|
SDValue Unpck2 = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v4i32,
|
|
|
|
Unpck1, CLod0, UnpcklMask);
|
|
|
|
SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, MVT::v2f64, Unpck2);
|
|
|
|
SDValue CLod1 = DAG.getLoad(MVT::v2f64, CLod0.getValue(1), CPIdx1,
|
|
|
|
PseudoSourceValue::getConstantPool(), 0, false, 16);
|
|
|
|
SDValue Sub = DAG.getNode(ISD::FSUB, MVT::v2f64, XR2F, CLod1);
|
|
|
|
// Add the halves; easiest way is to swap them into another reg first.
|
|
|
|
SDValue Shuf = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2f64,
|
|
|
|
Sub, Sub, ShufMask);
|
|
|
|
SDValue Add = DAG.getNode(ISD::FADD, MVT::v2f64, Shuf, Sub);
|
|
|
|
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f64, Add,
|
|
|
|
DAG.getIntPtrConstant(0));
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
std::pair<SDValue,SDValue> X86TargetLowering::
|
|
|
|
FP_TO_SINTHelper(SDValue Op, SelectionDAG &DAG) {
|
2008-06-08 20:54:56 +00:00
|
|
|
assert(Op.getValueType().getSimpleVT() <= MVT::i64 &&
|
|
|
|
Op.getValueType().getSimpleVT() >= MVT::i16 &&
|
2006-04-25 20:13:52 +00:00
|
|
|
"Unknown FP_TO_SINT to lower!");
|
|
|
|
|
2007-09-14 22:26:36 +00:00
|
|
|
// These are really Legal.
|
2007-09-23 14:52:20 +00:00
|
|
|
if (Op.getValueType() == MVT::i32 &&
|
2008-01-16 06:24:21 +00:00
|
|
|
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
|
2008-07-27 21:46:04 +00:00
|
|
|
return std::make_pair(SDValue(), SDValue());
|
2007-09-19 23:55:34 +00:00
|
|
|
if (Subtarget->is64Bit() &&
|
|
|
|
Op.getValueType() == MVT::i64 &&
|
|
|
|
Op.getOperand(0).getValueType() != MVT::f80)
|
2008-07-27 21:46:04 +00:00
|
|
|
return std::make_pair(SDValue(), SDValue());
|
2007-09-14 22:26:36 +00:00
|
|
|
|
2007-10-15 20:11:21 +00:00
|
|
|
// We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
|
|
|
|
// stack slot.
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned MemSize = Op.getValueType().getSizeInBits()/8;
|
2007-10-15 20:11:21 +00:00
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
2006-04-25 20:13:52 +00:00
|
|
|
unsigned Opc;
|
2008-06-06 12:08:01 +00:00
|
|
|
switch (Op.getValueType().getSimpleVT()) {
|
2007-11-24 07:07:01 +00:00
|
|
|
default: assert(0 && "Invalid FP_TO_SINT to lower!");
|
|
|
|
case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
|
|
|
|
case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
|
|
|
|
case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = DAG.getEntryNode();
|
|
|
|
SDValue Value = Op.getOperand(0);
|
2008-01-16 06:24:21 +00:00
|
|
|
if (isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) {
|
2006-04-25 20:13:52 +00:00
|
|
|
assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
|
2008-02-06 22:27:42 +00:00
|
|
|
Chain = DAG.getStore(Chain, Value, StackSlot,
|
2008-07-11 22:44:52 +00:00
|
|
|
PseudoSourceValue::getFixedStack(SSFI), 0);
|
2007-07-03 00:53:03 +00:00
|
|
|
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = {
|
2007-02-25 07:10:00 +00:00
|
|
|
Chain, StackSlot, DAG.getValueType(Op.getOperand(0).getValueType())
|
|
|
|
};
|
|
|
|
Value = DAG.getNode(X86ISD::FLD, Tys, Ops, 3);
|
2006-04-25 20:13:52 +00:00
|
|
|
Chain = Value.getValue(1);
|
|
|
|
SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
|
|
|
|
StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
2005-11-20 21:41:10 +00:00
|
|
|
}
|
2006-01-06 00:43:03 +00:00
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
// Build the FP_TO_INT*_IN_MEM
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { Chain, Value, StackSlot };
|
|
|
|
SDValue FIST = DAG.getNode(Opc, MVT::Other, Ops, 3);
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
return std::make_pair(FIST, StackSlot);
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
std::pair<SDValue,SDValue> Vals = FP_TO_SINTHelper(Op, DAG);
|
|
|
|
SDValue FIST = Vals.first, StackSlot = Vals.second;
|
2008-08-28 21:40:38 +00:00
|
|
|
if (FIST.getNode() == 0) return SDValue();
|
2007-10-17 06:17:29 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
// Load the result.
|
|
|
|
return DAG.getLoad(Op.getValueType(), FIST, StackSlot, NULL, 0);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
SDNode *X86TargetLowering::ExpandFP_TO_SINT(SDNode *N, SelectionDAG &DAG) {
|
2008-07-27 21:46:04 +00:00
|
|
|
std::pair<SDValue,SDValue> Vals = FP_TO_SINTHelper(SDValue(N, 0), DAG);
|
|
|
|
SDValue FIST = Vals.first, StackSlot = Vals.second;
|
2008-08-28 21:40:38 +00:00
|
|
|
if (FIST.getNode() == 0) return 0;
|
2008-06-30 10:19:09 +00:00
|
|
|
|
|
|
|
MVT VT = N->getValueType(0);
|
|
|
|
|
|
|
|
// Return a load from the stack slot.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Res = DAG.getLoad(VT, FIST, StackSlot, NULL, 0);
|
2007-11-24 07:07:01 +00:00
|
|
|
|
2008-07-02 17:40:58 +00:00
|
|
|
// Use MERGE_VALUES to drop the chain result value and get a node with one
|
|
|
|
// result. This requires turning off getMergeValues simplification, since
|
|
|
|
// otherwise it will give us Res back.
|
2008-08-28 21:40:38 +00:00
|
|
|
return DAG.getMergeValues(&Res, 1, false).getNode();
|
2008-06-30 10:19:09 +00:00
|
|
|
}
|
2007-11-24 07:07:01 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT EltVT = VT;
|
|
|
|
if (VT.isVector())
|
|
|
|
EltVT = VT.getVectorElementType();
|
2006-04-25 20:13:52 +00:00
|
|
|
std::vector<Constant*> CV;
|
2007-07-10 00:05:58 +00:00
|
|
|
if (EltVT == MVT::f64) {
|
2008-04-20 00:41:09 +00:00
|
|
|
Constant *C = ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63))));
|
2007-07-10 00:05:58 +00:00
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
2006-04-25 20:13:52 +00:00
|
|
|
} else {
|
2008-04-20 00:41:09 +00:00
|
|
|
Constant *C = ConstantFP::get(APFloat(APInt(32, ~(1U << 31))));
|
2007-07-10 00:05:58 +00:00
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
2007-07-27 17:16:43 +00:00
|
|
|
Constant *C = ConstantVector::get(CV);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
|
|
|
|
SDValue Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx,
|
2008-02-07 18:41:25 +00:00
|
|
|
PseudoSourceValue::getConstantPool(), 0,
|
2007-07-27 17:16:43 +00:00
|
|
|
false, 16);
|
2006-04-25 20:13:52 +00:00
|
|
|
return DAG.getNode(X86ISD::FAND, VT, Op.getOperand(0), Mask);
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT EltVT = VT;
|
2007-07-19 23:36:01 +00:00
|
|
|
unsigned EltNum = 1;
|
2008-06-06 12:08:01 +00:00
|
|
|
if (VT.isVector()) {
|
|
|
|
EltVT = VT.getVectorElementType();
|
|
|
|
EltNum = VT.getVectorNumElements();
|
2007-07-19 23:36:01 +00:00
|
|
|
}
|
2006-04-25 20:13:52 +00:00
|
|
|
std::vector<Constant*> CV;
|
2007-07-10 00:05:58 +00:00
|
|
|
if (EltVT == MVT::f64) {
|
2008-04-20 00:41:09 +00:00
|
|
|
Constant *C = ConstantFP::get(APFloat(APInt(64, 1ULL << 63)));
|
2007-07-10 00:05:58 +00:00
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
2006-04-25 20:13:52 +00:00
|
|
|
} else {
|
2008-04-20 00:41:09 +00:00
|
|
|
Constant *C = ConstantFP::get(APFloat(APInt(32, 1U << 31)));
|
2007-07-10 00:05:58 +00:00
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
|
|
|
CV.push_back(C);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
2007-07-27 17:16:43 +00:00
|
|
|
Constant *C = ConstantVector::get(CV);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
|
|
|
|
SDValue Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx,
|
2008-02-07 18:41:25 +00:00
|
|
|
PseudoSourceValue::getConstantPool(), 0,
|
2007-07-27 17:16:43 +00:00
|
|
|
false, 16);
|
2008-06-06 12:08:01 +00:00
|
|
|
if (VT.isVector()) {
|
2007-07-19 23:36:01 +00:00
|
|
|
return DAG.getNode(ISD::BIT_CONVERT, VT,
|
|
|
|
DAG.getNode(ISD::XOR, MVT::v2i64,
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)),
|
|
|
|
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Mask)));
|
|
|
|
} else {
|
|
|
|
return DAG.getNode(X86ISD::FXOR, VT, Op.getOperand(0), Mask);
|
|
|
|
}
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT SrcVT = Op1.getValueType();
|
2007-01-05 21:37:56 +00:00
|
|
|
|
|
|
|
// If second operand is smaller, extend it first.
|
2008-06-08 20:54:56 +00:00
|
|
|
if (SrcVT.bitsLT(VT)) {
|
2007-01-05 21:37:56 +00:00
|
|
|
Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1);
|
|
|
|
SrcVT = VT;
|
|
|
|
}
|
2007-10-21 01:07:44 +00:00
|
|
|
// And if it is bigger, shrink it first.
|
2008-06-08 20:54:56 +00:00
|
|
|
if (SrcVT.bitsGT(VT)) {
|
2008-01-17 07:00:52 +00:00
|
|
|
Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1));
|
2007-10-21 01:07:44 +00:00
|
|
|
SrcVT = VT;
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point the operands and the result should have the same
|
|
|
|
// type, and that won't be f80 since that is not custom lowered.
|
2007-01-05 21:37:56 +00:00
|
|
|
|
2007-01-05 07:55:56 +00:00
|
|
|
// First get the sign bit of second operand.
|
|
|
|
std::vector<Constant*> CV;
|
|
|
|
if (SrcVT == MVT::f64) {
|
2008-04-20 00:41:09 +00:00
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(64, 1ULL << 63))));
|
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(64, 0))));
|
2007-01-05 07:55:56 +00:00
|
|
|
} else {
|
2008-04-20 00:41:09 +00:00
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(32, 1U << 31))));
|
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
|
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
|
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
|
2007-01-05 07:55:56 +00:00
|
|
|
}
|
2007-07-27 17:16:43 +00:00
|
|
|
Constant *C = ConstantVector::get(CV);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
|
|
|
|
SDValue Mask1 = DAG.getLoad(SrcVT, DAG.getEntryNode(), CPIdx,
|
2008-02-07 18:41:25 +00:00
|
|
|
PseudoSourceValue::getConstantPool(), 0,
|
2007-07-27 17:16:43 +00:00
|
|
|
false, 16);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1);
|
2007-01-05 07:55:56 +00:00
|
|
|
|
|
|
|
// Shift sign bit right or left if the two operands have different types.
|
2008-06-08 20:54:56 +00:00
|
|
|
if (SrcVT.bitsGT(VT)) {
|
2007-01-05 07:55:56 +00:00
|
|
|
// Op0 is MVT::f32, Op1 is MVT::f64.
|
|
|
|
SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit);
|
|
|
|
SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit,
|
|
|
|
DAG.getConstant(32, MVT::i32));
|
|
|
|
SignBit = DAG.getNode(ISD::BIT_CONVERT, MVT::v4f32, SignBit);
|
|
|
|
SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::f32, SignBit,
|
2008-01-17 07:00:52 +00:00
|
|
|
DAG.getIntPtrConstant(0));
|
2007-01-05 07:55:56 +00:00
|
|
|
}
|
|
|
|
|
2007-01-05 21:37:56 +00:00
|
|
|
// Clear first operand sign bit.
|
|
|
|
CV.clear();
|
|
|
|
if (VT == MVT::f64) {
|
2008-04-20 00:41:09 +00:00
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63)))));
|
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(64, 0))));
|
2007-01-05 21:37:56 +00:00
|
|
|
} else {
|
2008-04-20 00:41:09 +00:00
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(32, ~(1U << 31)))));
|
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
|
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
|
|
|
|
CV.push_back(ConstantFP::get(APFloat(APInt(32, 0))));
|
2007-01-05 21:37:56 +00:00
|
|
|
}
|
2007-07-27 17:16:43 +00:00
|
|
|
C = ConstantVector::get(CV);
|
|
|
|
CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Mask2 = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx,
|
2008-02-07 18:41:25 +00:00
|
|
|
PseudoSourceValue::getConstantPool(), 0,
|
2007-07-27 17:16:43 +00:00
|
|
|
false, 16);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Val = DAG.getNode(X86ISD::FAND, VT, Op0, Mask2);
|
2007-01-05 21:37:56 +00:00
|
|
|
|
|
|
|
// Or the value with the sign bit.
|
|
|
|
return DAG.getNode(X86ISD::FOR, VT, Val, SignBit);
|
2007-01-05 07:55:56 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) {
|
2007-09-25 01:57:46 +00:00
|
|
|
assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cond;
|
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
|
|
|
SDValue CC = Op.getOperand(2);
|
2008-06-06 12:08:01 +00:00
|
|
|
bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
|
2007-09-25 01:57:46 +00:00
|
|
|
unsigned X86CC;
|
|
|
|
|
|
|
|
if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
|
2007-09-26 00:45:55 +00:00
|
|
|
Op0, Op1, DAG)) {
|
2007-09-29 00:00:36 +00:00
|
|
|
Cond = DAG.getNode(X86ISD::CMP, MVT::i32, Op0, Op1);
|
|
|
|
return DAG.getNode(X86ISD::SETCC, MVT::i8,
|
2007-09-25 01:57:46 +00:00
|
|
|
DAG.getConstant(X86CC, MVT::i8), Cond);
|
2007-09-26 00:45:55 +00:00
|
|
|
}
|
2007-09-25 01:57:46 +00:00
|
|
|
|
2008-10-15 02:05:31 +00:00
|
|
|
assert(0 && "Illegal SetCC!");
|
|
|
|
return SDValue();
|
2007-09-25 01:57:46 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDValue Cond;
|
|
|
|
SDValue Op0 = Op.getOperand(0);
|
|
|
|
SDValue Op1 = Op.getOperand(1);
|
|
|
|
SDValue CC = Op.getOperand(2);
|
2008-07-17 16:51:19 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
|
|
|
|
bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
|
|
|
|
|
|
|
|
if (isFP) {
|
|
|
|
unsigned SSECC = 8;
|
2008-08-05 22:19:15 +00:00
|
|
|
MVT VT0 = Op0.getValueType();
|
|
|
|
assert(VT0 == MVT::v4f32 || VT0 == MVT::v2f64);
|
|
|
|
unsigned Opc = VT0 == MVT::v4f32 ? X86ISD::CMPPS : X86ISD::CMPPD;
|
2008-07-17 16:51:19 +00:00
|
|
|
bool Swap = false;
|
|
|
|
|
|
|
|
switch (SetCCOpcode) {
|
|
|
|
default: break;
|
2008-07-25 19:05:58 +00:00
|
|
|
case ISD::SETOEQ:
|
2008-07-17 16:51:19 +00:00
|
|
|
case ISD::SETEQ: SSECC = 0; break;
|
|
|
|
case ISD::SETOGT:
|
|
|
|
case ISD::SETGT: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETLT:
|
|
|
|
case ISD::SETOLT: SSECC = 1; break;
|
|
|
|
case ISD::SETOGE:
|
|
|
|
case ISD::SETGE: Swap = true; // Fallthrough
|
|
|
|
case ISD::SETLE:
|
|
|
|
case ISD::SETOLE: SSECC = 2; break;
|
|
|
|
case ISD::SETUO: SSECC = 3; break;
|
2008-07-25 19:05:58 +00:00
|
|
|
case ISD::SETUNE:
|
2008-07-17 16:51:19 +00:00
|
|
|
case ISD::SETNE: SSECC = 4; break;
|
|
|
|
case ISD::SETULE: Swap = true;
|
|
|
|
case ISD::SETUGE: SSECC = 5; break;
|
|
|
|
case ISD::SETULT: Swap = true;
|
|
|
|
case ISD::SETUGT: SSECC = 6; break;
|
|
|
|
case ISD::SETO: SSECC = 7; break;
|
|
|
|
}
|
|
|
|
if (Swap)
|
|
|
|
std::swap(Op0, Op1);
|
|
|
|
|
2008-07-25 19:05:58 +00:00
|
|
|
// In the two special cases we can't handle, emit two comparisons.
|
2008-07-17 16:51:19 +00:00
|
|
|
if (SSECC == 8) {
|
2008-07-25 19:05:58 +00:00
|
|
|
if (SetCCOpcode == ISD::SETUEQ) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue UNORD, EQ;
|
2008-07-25 19:05:58 +00:00
|
|
|
UNORD = DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(3, MVT::i8));
|
|
|
|
EQ = DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(0, MVT::i8));
|
|
|
|
return DAG.getNode(ISD::OR, VT, UNORD, EQ);
|
|
|
|
}
|
|
|
|
else if (SetCCOpcode == ISD::SETONE) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ORD, NEQ;
|
2008-07-25 19:05:58 +00:00
|
|
|
ORD = DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(7, MVT::i8));
|
|
|
|
NEQ = DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(4, MVT::i8));
|
|
|
|
return DAG.getNode(ISD::AND, VT, ORD, NEQ);
|
|
|
|
}
|
|
|
|
assert(0 && "Illegal FP comparison");
|
2008-07-17 16:51:19 +00:00
|
|
|
}
|
|
|
|
// Handle all other FP comparisons here.
|
|
|
|
return DAG.getNode(Opc, VT, Op0, Op1, DAG.getConstant(SSECC, MVT::i8));
|
|
|
|
}
|
|
|
|
|
|
|
|
// We are handling one of the integer comparisons here. Since SSE only has
|
|
|
|
// GT and EQ comparisons for integer, swapping operands and multiple
|
|
|
|
// operations may be required for some comparisons.
|
|
|
|
unsigned Opc = 0, EQOpc = 0, GTOpc = 0;
|
|
|
|
bool Swap = false, Invert = false, FlipSigns = false;
|
|
|
|
|
|
|
|
switch (VT.getSimpleVT()) {
|
|
|
|
default: break;
|
|
|
|
case MVT::v16i8: EQOpc = X86ISD::PCMPEQB; GTOpc = X86ISD::PCMPGTB; break;
|
|
|
|
case MVT::v8i16: EQOpc = X86ISD::PCMPEQW; GTOpc = X86ISD::PCMPGTW; break;
|
|
|
|
case MVT::v4i32: EQOpc = X86ISD::PCMPEQD; GTOpc = X86ISD::PCMPGTD; break;
|
|
|
|
case MVT::v2i64: EQOpc = X86ISD::PCMPEQQ; GTOpc = X86ISD::PCMPGTQ; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (SetCCOpcode) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETNE: Invert = true;
|
|
|
|
case ISD::SETEQ: Opc = EQOpc; break;
|
|
|
|
case ISD::SETLT: Swap = true;
|
|
|
|
case ISD::SETGT: Opc = GTOpc; break;
|
|
|
|
case ISD::SETGE: Swap = true;
|
|
|
|
case ISD::SETLE: Opc = GTOpc; Invert = true; break;
|
|
|
|
case ISD::SETULT: Swap = true;
|
|
|
|
case ISD::SETUGT: Opc = GTOpc; FlipSigns = true; break;
|
|
|
|
case ISD::SETUGE: Swap = true;
|
|
|
|
case ISD::SETULE: Opc = GTOpc; FlipSigns = true; Invert = true; break;
|
|
|
|
}
|
|
|
|
if (Swap)
|
|
|
|
std::swap(Op0, Op1);
|
|
|
|
|
|
|
|
// Since SSE has no unsigned integer comparisons, we need to flip the sign
|
|
|
|
// bits of the inputs before performing those operations.
|
|
|
|
if (FlipSigns) {
|
|
|
|
MVT EltVT = VT.getVectorElementType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue SignBit = DAG.getConstant(EltVT.getIntegerVTSignBit(), EltVT);
|
|
|
|
std::vector<SDValue> SignBits(VT.getVectorNumElements(), SignBit);
|
|
|
|
SDValue SignVec = DAG.getNode(ISD::BUILD_VECTOR, VT, &SignBits[0],
|
2008-07-17 16:51:19 +00:00
|
|
|
SignBits.size());
|
|
|
|
Op0 = DAG.getNode(ISD::XOR, VT, Op0, SignVec);
|
|
|
|
Op1 = DAG.getNode(ISD::XOR, VT, Op1, SignVec);
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Result = DAG.getNode(Opc, VT, Op0, Op1);
|
2008-07-17 16:51:19 +00:00
|
|
|
|
|
|
|
// If the logical-not of the result is required, perform that now.
|
|
|
|
if (Invert) {
|
|
|
|
MVT EltVT = VT.getVectorElementType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NegOne = DAG.getConstant(EltVT.getIntegerVTBitMask(), EltVT);
|
|
|
|
std::vector<SDValue> NegOnes(VT.getVectorNumElements(), NegOne);
|
|
|
|
SDValue NegOneV = DAG.getNode(ISD::BUILD_VECTOR, VT, &NegOnes[0],
|
2008-07-17 16:51:19 +00:00
|
|
|
NegOnes.size());
|
|
|
|
Result = DAG.getNode(ISD::XOR, VT, Result, NegOneV);
|
|
|
|
}
|
|
|
|
return Result;
|
|
|
|
}
|
2007-09-25 01:57:46 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) {
|
2006-09-11 02:19:56 +00:00
|
|
|
bool addTest = true;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cond = Op.getOperand(0);
|
|
|
|
SDValue CC;
|
2006-09-11 02:19:56 +00:00
|
|
|
|
|
|
|
if (Cond.getOpcode() == ISD::SETCC)
|
2007-09-29 00:00:36 +00:00
|
|
|
Cond = LowerSETCC(Cond, DAG);
|
2006-09-11 02:19:56 +00:00
|
|
|
|
2007-10-08 22:16:29 +00:00
|
|
|
// If condition flag is set by a X86ISD::CMP, then use it as the condition
|
|
|
|
// setting operand in place of the X86ISD::SETCC.
|
2006-09-11 02:19:56 +00:00
|
|
|
if (Cond.getOpcode() == X86ISD::SETCC) {
|
|
|
|
CC = Cond.getOperand(0);
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cmp = Cond.getOperand(1);
|
2006-09-11 02:19:56 +00:00
|
|
|
unsigned Opc = Cmp.getOpcode();
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
2008-01-16 06:19:45 +00:00
|
|
|
|
2007-10-08 22:16:29 +00:00
|
|
|
bool IllegalFPCMov = false;
|
2008-06-06 12:08:01 +00:00
|
|
|
if (VT.isFloatingPoint() && !VT.isVector() &&
|
2008-01-16 06:24:21 +00:00
|
|
|
!isScalarFPTypeInSSEReg(VT)) // FPStack?
|
2008-09-26 21:54:37 +00:00
|
|
|
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
|
2008-01-16 06:19:45 +00:00
|
|
|
|
2007-09-29 00:00:36 +00:00
|
|
|
if ((Opc == X86ISD::CMP ||
|
|
|
|
Opc == X86ISD::COMI ||
|
|
|
|
Opc == X86ISD::UCOMI) && !IllegalFPCMov) {
|
2007-10-08 22:16:29 +00:00
|
|
|
Cond = Cmp;
|
2007-09-25 01:57:46 +00:00
|
|
|
addTest = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (addTest) {
|
|
|
|
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
|
2007-10-08 22:16:29 +00:00
|
|
|
Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
|
2007-09-25 01:57:46 +00:00
|
|
|
}
|
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(),
|
2007-09-25 01:57:46 +00:00
|
|
|
MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 4> Ops;
|
2007-09-25 01:57:46 +00:00
|
|
|
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
|
|
|
|
// condition is true.
|
|
|
|
Ops.push_back(Op.getOperand(2));
|
|
|
|
Ops.push_back(Op.getOperand(1));
|
|
|
|
Ops.push_back(CC);
|
|
|
|
Ops.push_back(Cond);
|
2007-09-29 00:00:36 +00:00
|
|
|
return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
|
2007-09-25 01:57:46 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) {
|
2006-09-11 02:19:56 +00:00
|
|
|
bool addTest = true;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Cond = Op.getOperand(1);
|
|
|
|
SDValue Dest = Op.getOperand(2);
|
|
|
|
SDValue CC;
|
2006-09-11 02:19:56 +00:00
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
if (Cond.getOpcode() == ISD::SETCC)
|
2007-09-29 00:00:36 +00:00
|
|
|
Cond = LowerSETCC(Cond, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
|
2007-10-08 22:16:29 +00:00
|
|
|
// If condition flag is set by a X86ISD::CMP, then use it as the condition
|
|
|
|
// setting operand in place of the X86ISD::SETCC.
|
2006-04-25 20:13:52 +00:00
|
|
|
if (Cond.getOpcode() == X86ISD::SETCC) {
|
2006-09-11 02:19:56 +00:00
|
|
|
CC = Cond.getOperand(0);
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cmp = Cond.getOperand(1);
|
2006-09-11 02:19:56 +00:00
|
|
|
unsigned Opc = Cmp.getOpcode();
|
2007-09-29 00:00:36 +00:00
|
|
|
if (Opc == X86ISD::CMP ||
|
|
|
|
Opc == X86ISD::COMI ||
|
|
|
|
Opc == X86ISD::UCOMI) {
|
2007-10-08 22:16:29 +00:00
|
|
|
Cond = Cmp;
|
2007-09-25 01:57:46 +00:00
|
|
|
addTest = false;
|
|
|
|
}
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
// Also, recognize the pattern generated by an FCMP_UNE. We can emit
|
|
|
|
// two branches instead of an explicit OR instruction with a
|
|
|
|
// separate test.
|
|
|
|
} else if (Cond.getOpcode() == ISD::OR &&
|
|
|
|
Cond.hasOneUse() &&
|
|
|
|
Cond.getOperand(0).getOpcode() == X86ISD::SETCC &&
|
|
|
|
Cond.getOperand(0).hasOneUse() &&
|
|
|
|
Cond.getOperand(1).getOpcode() == X86ISD::SETCC &&
|
|
|
|
Cond.getOperand(1).hasOneUse()) {
|
|
|
|
SDValue Cmp = Cond.getOperand(0).getOperand(1);
|
|
|
|
unsigned Opc = Cmp.getOpcode();
|
|
|
|
if (Cmp == Cond.getOperand(1).getOperand(1) &&
|
|
|
|
(Opc == X86ISD::CMP ||
|
|
|
|
Opc == X86ISD::COMI ||
|
|
|
|
Opc == X86ISD::UCOMI)) {
|
|
|
|
CC = Cond.getOperand(0).getOperand(0);
|
|
|
|
Chain = DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
|
|
|
|
Chain, Dest, CC, Cmp);
|
|
|
|
CC = Cond.getOperand(1).getOperand(0);
|
|
|
|
Cond = Cmp;
|
|
|
|
addTest = false;
|
|
|
|
}
|
|
|
|
// Also, recognize the pattern generated by an FCMP_OEQ. We can emit
|
|
|
|
// two branches instead of an explicit AND instruction with a
|
|
|
|
// separate test. However, we only do this if this block doesn't
|
|
|
|
// have a fall-through edge, because this requires an explicit
|
|
|
|
// jmp when the condition is false.
|
|
|
|
} else if (Cond.getOpcode() == ISD::AND &&
|
|
|
|
Cond.hasOneUse() &&
|
|
|
|
Cond.getOperand(0).getOpcode() == X86ISD::SETCC &&
|
|
|
|
Cond.getOperand(0).hasOneUse() &&
|
|
|
|
Cond.getOperand(1).getOpcode() == X86ISD::SETCC &&
|
|
|
|
Cond.getOperand(1).hasOneUse()) {
|
|
|
|
SDValue Cmp = Cond.getOperand(0).getOperand(1);
|
|
|
|
unsigned Opc = Cmp.getOpcode();
|
|
|
|
if (Cmp == Cond.getOperand(1).getOperand(1) &&
|
|
|
|
(Opc == X86ISD::CMP ||
|
|
|
|
Opc == X86ISD::COMI ||
|
|
|
|
Opc == X86ISD::UCOMI) &&
|
|
|
|
Op.getNode()->hasOneUse()) {
|
|
|
|
X86::CondCode CCode =
|
|
|
|
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
|
|
|
|
CCode = X86::GetOppositeBranchCondition(CCode);
|
|
|
|
CC = DAG.getConstant(CCode, MVT::i8);
|
|
|
|
SDValue User = SDValue(*Op.getNode()->use_begin(), 0);
|
|
|
|
// Look for an unconditional branch following this conditional branch.
|
|
|
|
// We need this because we need to reverse the successors in order
|
|
|
|
// to implement FCMP_OEQ.
|
|
|
|
if (User.getOpcode() == ISD::BR) {
|
|
|
|
SDValue FalseBB = User.getOperand(1);
|
|
|
|
SDValue NewBR =
|
|
|
|
DAG.UpdateNodeOperands(User, User.getOperand(0), Dest);
|
|
|
|
assert(NewBR == User);
|
|
|
|
Dest = FalseBB;
|
|
|
|
|
|
|
|
Chain = DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
|
|
|
|
Chain, Dest, CC, Cmp);
|
|
|
|
X86::CondCode CCode =
|
|
|
|
(X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
|
|
|
|
CCode = X86::GetOppositeBranchCondition(CCode);
|
|
|
|
CC = DAG.getConstant(CCode, MVT::i8);
|
|
|
|
Cond = Cmp;
|
|
|
|
addTest = false;
|
|
|
|
}
|
|
|
|
}
|
2007-09-25 01:57:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (addTest) {
|
|
|
|
CC = DAG.getConstant(X86::COND_NE, MVT::i8);
|
2007-09-29 00:00:36 +00:00
|
|
|
Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
|
2007-09-25 01:57:46 +00:00
|
|
|
}
|
2007-09-29 00:00:36 +00:00
|
|
|
return DAG.getNode(X86ISD::BRCOND, Op.getValueType(),
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
Chain, Dest, CC, Cond);
|
2007-09-25 01:57:46 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 19:34:00 +00:00
|
|
|
|
|
|
|
// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
|
|
|
|
// Calls to _alloca is needed to probe the stack when allocating more than 4k
|
|
|
|
// bytes in one go. Touching the stack at 4K increments is necessary to ensure
|
|
|
|
// that the guard pages used by the OS virtual memory manager are allocated in
|
|
|
|
// correct sequence.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
|
2007-07-05 20:36:08 +00:00
|
|
|
SelectionDAG &DAG) {
|
2007-04-17 19:34:00 +00:00
|
|
|
assert(Subtarget->isTargetCygMing() &&
|
|
|
|
"This should be used only on Cygwin/Mingw targets");
|
2008-06-11 20:16:42 +00:00
|
|
|
|
2007-04-17 09:20:00 +00:00
|
|
|
// Get the inputs.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Size = Op.getOperand(1);
|
2007-04-17 09:20:00 +00:00
|
|
|
// FIXME: Ensure alignment here
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Flag;
|
2008-06-11 20:16:42 +00:00
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT IntPtr = getPointerTy();
|
|
|
|
MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
|
2007-07-05 20:36:08 +00:00
|
|
|
|
2008-10-11 22:08:30 +00:00
|
|
|
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true));
|
2008-06-11 20:16:42 +00:00
|
|
|
|
2007-07-05 20:36:08 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
|
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
|
|
|
|
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { Chain,
|
2008-09-16 21:48:12 +00:00
|
|
|
DAG.getTargetExternalSymbol("_alloca", IntPtr),
|
2007-07-05 20:36:08 +00:00
|
|
|
DAG.getRegister(X86::EAX, IntPtr),
|
2008-06-11 20:16:42 +00:00
|
|
|
DAG.getRegister(X86StackPtr, SPTy),
|
2007-07-05 20:36:08 +00:00
|
|
|
Flag };
|
2008-06-11 20:16:42 +00:00
|
|
|
Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 5);
|
2007-07-05 20:36:08 +00:00
|
|
|
Flag = Chain.getValue(1);
|
|
|
|
|
2008-06-11 20:16:42 +00:00
|
|
|
Chain = DAG.getCALLSEQ_END(Chain,
|
2008-10-11 22:08:30 +00:00
|
|
|
DAG.getIntPtrConstant(0, true),
|
|
|
|
DAG.getIntPtrConstant(0, true),
|
2008-06-11 20:16:42 +00:00
|
|
|
Flag);
|
|
|
|
|
2007-07-05 20:36:08 +00:00
|
|
|
Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
|
2008-06-11 20:16:42 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops1[2] = { Chain.getValue(0), Chain };
|
2008-07-02 17:40:58 +00:00
|
|
|
return DAG.getMergeValues(Ops1, 2);
|
2007-04-17 09:20:00 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
X86TargetLowering::EmitTargetCodeForMemset(SelectionDAG &DAG,
|
2008-09-30 21:22:07 +00:00
|
|
|
SDValue Chain,
|
|
|
|
SDValue Dst, SDValue Src,
|
|
|
|
SDValue Size, unsigned Align,
|
|
|
|
const Value *DstSV,
|
2008-10-01 00:59:58 +00:00
|
|
|
uint64_t DstSVOff) {
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
|
|
|
|
|
2008-09-30 21:22:07 +00:00
|
|
|
// If not DWORD aligned or size is more than the threshold, call the library.
|
|
|
|
// The libc version is likely to be faster for these cases. It can use the
|
|
|
|
// address value and run time information about the CPU.
|
2008-08-21 21:00:15 +00:00
|
|
|
if ((Align & 3) != 0 ||
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
!ConstantSize ||
|
2008-09-12 16:56:44 +00:00
|
|
|
ConstantSize->getZExtValue() >
|
|
|
|
getSubtarget()->getMaxInlineSizeThreshold()) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InFlag(0, 0);
|
2008-04-01 20:38:36 +00:00
|
|
|
|
|
|
|
// Check to see if there is a specialized entry-point for memory zeroing.
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
|
2008-09-30 21:22:07 +00:00
|
|
|
|
2008-10-01 00:59:58 +00:00
|
|
|
if (const char *bzeroEntry = V &&
|
|
|
|
V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
|
|
|
|
MVT IntPtr = getPointerTy();
|
|
|
|
const Type *IntPtrTy = TD->getIntPtrType();
|
|
|
|
TargetLowering::ArgListTy Args;
|
|
|
|
TargetLowering::ArgListEntry Entry;
|
|
|
|
Entry.Node = Dst;
|
|
|
|
Entry.Ty = IntPtrTy;
|
|
|
|
Args.push_back(Entry);
|
|
|
|
Entry.Node = Size;
|
|
|
|
Args.push_back(Entry);
|
|
|
|
std::pair<SDValue,SDValue> CallResult =
|
|
|
|
LowerCallTo(Chain, Type::VoidTy, false, false, false, false,
|
|
|
|
CallingConv::C, false,
|
|
|
|
DAG.getExternalSymbol(bzeroEntry, IntPtr), Args, DAG);
|
|
|
|
return CallResult.second;
|
2008-04-01 20:38:36 +00:00
|
|
|
}
|
|
|
|
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
// Otherwise have the target-independent code call memset.
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-09-12 16:56:44 +00:00
|
|
|
uint64_t SizeVal = ConstantSize->getZExtValue();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InFlag(0, 0);
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT AVT;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Count;
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src);
|
2006-04-25 20:13:52 +00:00
|
|
|
unsigned BytesLeft = 0;
|
|
|
|
bool TwoRepStos = false;
|
|
|
|
if (ValC) {
|
|
|
|
unsigned ValReg;
|
2008-09-12 16:56:44 +00:00
|
|
|
uint64_t Val = ValC->getZExtValue() & 255;
|
2006-04-25 20:13:52 +00:00
|
|
|
|
|
|
|
// If the value is a constant, then we can potentially use larger sets.
|
|
|
|
switch (Align & 3) {
|
2008-08-21 21:00:15 +00:00
|
|
|
case 2: // WORD aligned
|
|
|
|
AVT = MVT::i16;
|
|
|
|
ValReg = X86::AX;
|
|
|
|
Val = (Val << 8) | Val;
|
|
|
|
break;
|
|
|
|
case 0: // DWORD aligned
|
|
|
|
AVT = MVT::i32;
|
|
|
|
ValReg = X86::EAX;
|
|
|
|
Val = (Val << 8) | Val;
|
|
|
|
Val = (Val << 16) | Val;
|
|
|
|
if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) { // QWORD aligned
|
|
|
|
AVT = MVT::i64;
|
|
|
|
ValReg = X86::RAX;
|
|
|
|
Val = (Val << 32) | Val;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default: // Byte aligned
|
|
|
|
AVT = MVT::i8;
|
|
|
|
ValReg = X86::AL;
|
|
|
|
Count = DAG.getIntPtrConstant(SizeVal);
|
|
|
|
break;
|
2006-04-19 22:48:17 +00:00
|
|
|
}
|
|
|
|
|
2008-06-08 20:54:56 +00:00
|
|
|
if (AVT.bitsGT(MVT::i8)) {
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned UBytes = AVT.getSizeInBits() / 8;
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
Count = DAG.getIntPtrConstant(SizeVal / UBytes);
|
|
|
|
BytesLeft = SizeVal % UBytes;
|
2006-09-08 06:48:29 +00:00
|
|
|
}
|
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, ValReg, DAG.getConstant(Val, AVT),
|
|
|
|
InFlag);
|
|
|
|
InFlag = Chain.getValue(1);
|
|
|
|
} else {
|
|
|
|
AVT = MVT::i8;
|
2008-04-16 01:32:32 +00:00
|
|
|
Count = DAG.getIntPtrConstant(SizeVal);
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, X86::AL, Src, InFlag);
|
2006-04-25 20:13:52 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-03-22 02:53:00 +00:00
|
|
|
}
|
2006-03-27 07:00:16 +00:00
|
|
|
|
2006-09-08 06:48:29 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
|
|
|
|
Count, InFlag);
|
2006-04-25 20:13:52 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-09-08 06:48:29 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
Dst, InFlag);
|
2006-04-25 20:13:52 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-04-24 22:58:52 +00:00
|
|
|
|
2007-02-25 06:40:16 +00:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> Ops;
|
2006-04-25 20:13:52 +00:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getValueType(AVT));
|
|
|
|
Ops.push_back(InFlag);
|
2006-08-11 07:35:45 +00:00
|
|
|
Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27939 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 23:03:30 +00:00
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
if (TwoRepStos) {
|
|
|
|
InFlag = Chain.getValue(1);
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
Count = Size;
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT CVT = Count.getValueType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Left = DAG.getNode(ISD::AND, CVT, Count,
|
2006-09-08 06:48:29 +00:00
|
|
|
DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
|
|
|
|
Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
|
|
|
|
Left, InFlag);
|
2006-04-25 20:13:52 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
2007-02-25 06:40:16 +00:00
|
|
|
Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2006-04-25 20:13:52 +00:00
|
|
|
Ops.clear();
|
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getValueType(MVT::i8));
|
|
|
|
Ops.push_back(InFlag);
|
2006-08-11 07:35:45 +00:00
|
|
|
Chain = DAG.getNode(X86ISD::REP_STOS, Tys, &Ops[0], Ops.size());
|
2006-04-25 20:13:52 +00:00
|
|
|
} else if (BytesLeft) {
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
// Handle the last 1 - 7 bytes.
|
|
|
|
unsigned Offset = SizeVal - BytesLeft;
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT AddrVT = Dst.getValueType();
|
|
|
|
MVT SizeVT = Size.getValueType();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
|
|
|
|
Chain = DAG.getMemset(Chain,
|
|
|
|
DAG.getNode(ISD::ADD, AddrVT, Dst,
|
|
|
|
DAG.getConstant(Offset, AddrVT)),
|
|
|
|
Src,
|
|
|
|
DAG.getConstant(BytesLeft, SizeVT),
|
2008-04-28 17:15:20 +00:00
|
|
|
Align, DstSV, DstSVOff + Offset);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
// TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
|
2006-04-25 20:13:52 +00:00
|
|
|
return Chain;
|
|
|
|
}
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27939 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 23:03:30 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
X86TargetLowering::EmitTargetCodeForMemcpy(SelectionDAG &DAG,
|
2008-08-21 21:00:15 +00:00
|
|
|
SDValue Chain, SDValue Dst, SDValue Src,
|
|
|
|
SDValue Size, unsigned Align,
|
|
|
|
bool AlwaysInline,
|
|
|
|
const Value *DstSV, uint64_t DstSVOff,
|
|
|
|
const Value *SrcSV, uint64_t SrcSVOff) {
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
// This requires the copy size to be a constant, preferrably
|
|
|
|
// within a subtarget-specific limit.
|
|
|
|
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
|
|
|
|
if (!ConstantSize)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-09-12 16:56:44 +00:00
|
|
|
uint64_t SizeVal = ConstantSize->getZExtValue();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold())
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
|
2008-08-21 21:00:15 +00:00
|
|
|
/// If not DWORD aligned, call the library.
|
|
|
|
if ((Align & 3) != 0)
|
|
|
|
return SDValue();
|
|
|
|
|
|
|
|
// DWORD aligned
|
|
|
|
MVT AVT = MVT::i32;
|
|
|
|
if (Subtarget->is64Bit() && ((Align & 0x7) == 0)) // QWORD aligned
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
AVT = MVT::i64;
|
Revamp build_vector lowering to take advantage of movss and movd instructions.
movd always clear the top 96 bits and movss does so when it's loading the
value from memory.
The net result is codegen for 4-wide shuffles is much improved. It is near
optimal if one or more elements is a zero. e.g.
__m128i test(int a, int b) {
return _mm_set_epi32(0, 0, b, a);
}
compiles to
_test:
movd 8(%esp), %xmm1
movd 4(%esp), %xmm0
punpckldq %xmm1, %xmm0
ret
compare to gcc:
_test:
subl $12, %esp
movd 20(%esp), %xmm0
movd 16(%esp), %xmm1
punpckldq %xmm0, %xmm1
movq %xmm1, %xmm0
movhps LC0, %xmm0
addl $12, %esp
ret
or icc:
_test:
movd 4(%esp), %xmm0 #5.10
movd 8(%esp), %xmm3 #5.10
xorl %eax, %eax #5.10
movd %eax, %xmm1 #5.10
punpckldq %xmm1, %xmm0 #5.10
movd %eax, %xmm2 #5.10
punpckldq %xmm2, %xmm3 #5.10
punpckldq %xmm3, %xmm0 #5.10
ret #5.10
There are still room for improvement, for example the FP variant of the above example:
__m128 test(float a, float b) {
return _mm_set_ps(0.0, 0.0, b, a);
}
_test:
movss 8(%esp), %xmm1
movss 4(%esp), %xmm0
unpcklps %xmm1, %xmm0
xorps %xmm1, %xmm1
movlhps %xmm1, %xmm0
ret
The xorps and movlhps are unnecessary. This will require post legalizer optimization to handle.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27939 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 23:03:30 +00:00
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned UBytes = AVT.getSizeInBits() / 8;
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
unsigned CountVal = SizeVal / UBytes;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Count = DAG.getIntPtrConstant(CountVal);
|
2008-08-21 21:00:15 +00:00
|
|
|
unsigned BytesLeft = SizeVal % UBytes;
|
2006-09-08 06:48:29 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue InFlag(0, 0);
|
2006-09-08 06:48:29 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RCX : X86::ECX,
|
|
|
|
Count, InFlag);
|
2006-04-25 20:13:52 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-09-08 06:48:29 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RDI : X86::EDI,
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
Dst, InFlag);
|
2006-04-25 20:13:52 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-09-08 06:48:29 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, Subtarget->is64Bit() ? X86::RSI : X86::ESI,
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
Src, InFlag);
|
2006-04-25 20:13:52 +00:00
|
|
|
InFlag = Chain.getValue(1);
|
Now generating perfect (I think) code for "vector set" with a single non-zero
scalar value.
e.g.
_mm_set_epi32(0, a, 0, 0);
==>
movd 4(%esp), %xmm0
pshufd $69, %xmm0, %xmm0
_mm_set_epi8(0, 0, 0, 0, 0, a, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
==>
movzbw 4(%esp), %ax
movzwl %ax, %eax
pxor %xmm0, %xmm0
pinsrw $5, %eax, %xmm0
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@27923 91177308-0d34-0410-b5e6-96231b3b80d8
2006-04-21 01:05:10 +00:00
|
|
|
|
2007-02-25 06:40:16 +00:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> Ops;
|
2006-04-25 20:13:52 +00:00
|
|
|
Ops.push_back(Chain);
|
|
|
|
Ops.push_back(DAG.getValueType(AVT));
|
|
|
|
Ops.push_back(InFlag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue RepMovs = DAG.getNode(X86ISD::REP_MOVS, Tys, &Ops[0], Ops.size());
|
2006-03-24 07:29:27 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 4> Results;
|
2008-04-25 00:26:43 +00:00
|
|
|
Results.push_back(RepMovs);
|
2007-09-28 12:53:01 +00:00
|
|
|
if (BytesLeft) {
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
// Handle the last 1 - 7 bytes.
|
|
|
|
unsigned Offset = SizeVal - BytesLeft;
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT DstVT = Dst.getValueType();
|
|
|
|
MVT SrcVT = Src.getValueType();
|
|
|
|
MVT SizeVT = Size.getValueType();
|
2008-04-25 00:26:43 +00:00
|
|
|
Results.push_back(DAG.getMemcpy(Chain,
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
DAG.getNode(ISD::ADD, DstVT, Dst,
|
2008-04-25 00:26:43 +00:00
|
|
|
DAG.getConstant(Offset, DstVT)),
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
DAG.getNode(ISD::ADD, SrcVT, Src,
|
2008-04-25 00:26:43 +00:00
|
|
|
DAG.getConstant(Offset, SrcVT)),
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
DAG.getConstant(BytesLeft, SizeVT),
|
|
|
|
Align, AlwaysInline,
|
2008-04-28 17:15:20 +00:00
|
|
|
DstSV, DstSVOff + Offset,
|
|
|
|
SrcSV, SrcSVOff + Offset));
|
Drop ISD::MEMSET, ISD::MEMMOVE, and ISD::MEMCPY, which are not Legal
on any current target and aren't optimized in DAGCombiner. Instead
of using intermediate nodes, expand the operations, choosing between
simple loads/stores, target-specific code, and library calls,
immediately.
Previously, the code to emit optimized code for these operations
was only used at initial SelectionDAG construction time; now it is
used at all times. This fixes some cases where rep;movs was being
used for small copies where simple loads/stores would be better.
This also cleans up code that checks for alignments less than 4;
let the targets make that decision instead of doing it in
target-independent code. This allows x86 to use rep;movs in
low-alignment cases.
Also, this fixes a bug that resulted in the use of rep;stos for
memsets of 0 with non-constant memory size when the alignment was
at least 4. It's better to use the library in this case, which
can be significantly faster when the size is large.
This also preserves more SourceValue information when memory
intrinsics are lowered into simple loads/stores.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49572 91177308-0d34-0410-b5e6-96231b3b80d8
2008-04-12 04:36:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return DAG.getNode(ISD::TokenFactor, MVT::Other, &Results[0], Results.size());
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
2006-03-31 19:22:53 +00:00
|
|
|
|
2007-11-24 07:07:01 +00:00
|
|
|
/// Expand the result of: i64,outchain = READCYCLECOUNTER inchain
|
|
|
|
SDNode *X86TargetLowering::ExpandREADCYCLECOUNTER(SDNode *N, SelectionDAG &DAG){
|
2007-02-25 06:40:16 +00:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue TheChain = N->getOperand(0);
|
|
|
|
SDValue rd = DAG.getNode(X86ISD::RDTSC_DAG, Tys, &TheChain, 1);
|
2006-11-29 08:28:13 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue rax = DAG.getCopyFromReg(rd, X86::RAX, MVT::i64, rd.getValue(1));
|
|
|
|
SDValue rdx = DAG.getCopyFromReg(rax.getValue(1), X86::RDX,
|
2007-11-24 07:07:01 +00:00
|
|
|
MVT::i64, rax.getValue(2));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Tmp = DAG.getNode(ISD::SHL, MVT::i64, rdx,
|
2006-11-29 08:28:13 +00:00
|
|
|
DAG.getConstant(32, MVT::i8));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = {
|
2007-11-24 07:07:01 +00:00
|
|
|
DAG.getNode(ISD::OR, MVT::i64, rax, Tmp), rdx.getValue(1)
|
2007-02-25 07:10:00 +00:00
|
|
|
};
|
2007-02-25 06:40:16 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
return DAG.getMergeValues(Ops, 2).getNode();
|
2007-02-25 07:10:00 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue eax = DAG.getCopyFromReg(rd, X86::EAX, MVT::i32, rd.getValue(1));
|
|
|
|
SDValue edx = DAG.getCopyFromReg(eax.getValue(1), X86::EDX,
|
2007-11-24 07:07:01 +00:00
|
|
|
MVT::i32, eax.getValue(2));
|
|
|
|
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { eax, edx };
|
2007-11-24 07:07:01 +00:00
|
|
|
Ops[0] = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Ops, 2);
|
|
|
|
|
|
|
|
// Use a MERGE_VALUES to return the value and chain.
|
|
|
|
Ops[1] = edx.getValue(1);
|
2008-08-28 21:40:38 +00:00
|
|
|
return DAG.getMergeValues(Ops, 2).getNode();
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) {
|
2008-02-06 22:27:42 +00:00
|
|
|
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
|
2006-10-13 21:14:26 +00:00
|
|
|
|
2006-09-08 06:48:29 +00:00
|
|
|
if (!Subtarget->is64Bit()) {
|
|
|
|
// vastart just stores the address of the VarArgsFrameIndex slot into the
|
|
|
|
// memory location argument.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
|
2008-02-06 22:27:42 +00:00
|
|
|
return DAG.getStore(Op.getOperand(0), FR,Op.getOperand(1), SV, 0);
|
2006-09-08 06:48:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// __va_list_tag:
|
|
|
|
// gp_offset (0 - 6 * 8)
|
|
|
|
// fp_offset (48 - 48 + 8 * 16)
|
|
|
|
// overflow_arg_area (point to parameters coming in memory).
|
|
|
|
// reg_save_area
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> MemOps;
|
|
|
|
SDValue FIN = Op.getOperand(1);
|
2006-09-08 06:48:29 +00:00
|
|
|
// Store gp_offset
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Store = DAG.getStore(Op.getOperand(0),
|
2006-10-05 23:01:46 +00:00
|
|
|
DAG.getConstant(VarArgsGPOffset, MVT::i32),
|
2008-02-06 22:27:42 +00:00
|
|
|
FIN, SV, 0);
|
2006-09-08 06:48:29 +00:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
|
|
|
|
// Store fp_offset
|
2008-01-17 07:00:52 +00:00
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4));
|
2006-10-05 23:01:46 +00:00
|
|
|
Store = DAG.getStore(Op.getOperand(0),
|
|
|
|
DAG.getConstant(VarArgsFPOffset, MVT::i32),
|
2008-02-06 22:27:42 +00:00
|
|
|
FIN, SV, 0);
|
2006-09-08 06:48:29 +00:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
|
|
|
|
// Store ptr to overflow_arg_area
|
2008-01-17 07:00:52 +00:00
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(4));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue OVFIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
|
2008-02-06 22:27:42 +00:00
|
|
|
Store = DAG.getStore(Op.getOperand(0), OVFIN, FIN, SV, 0);
|
2006-09-08 06:48:29 +00:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
|
|
|
|
// Store ptr to reg_save_area.
|
2008-01-17 07:00:52 +00:00
|
|
|
FIN = DAG.getNode(ISD::ADD, getPointerTy(), FIN, DAG.getIntPtrConstant(8));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue RSFIN = DAG.getFrameIndex(RegSaveFrameIndex, getPointerTy());
|
2008-02-06 22:27:42 +00:00
|
|
|
Store = DAG.getStore(Op.getOperand(0), RSFIN, FIN, SV, 0);
|
2006-09-08 06:48:29 +00:00
|
|
|
MemOps.push_back(Store);
|
|
|
|
return DAG.getNode(ISD::TokenFactor, MVT::Other, &MemOps[0], MemOps.size());
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) {
|
2008-05-10 01:26:14 +00:00
|
|
|
// X86-64 va_list is a struct { i32, i32, i8*, i8* }.
|
|
|
|
assert(Subtarget->is64Bit() && "This code only handles 64-bit va_arg!");
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue SrcPtr = Op.getOperand(1);
|
|
|
|
SDValue SrcSV = Op.getOperand(2);
|
2008-05-10 01:26:14 +00:00
|
|
|
|
|
|
|
assert(0 && "VAArgInst is not yet implemented for x86-64!");
|
|
|
|
abort();
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-05-10 01:26:14 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) {
|
2007-03-02 23:16:35 +00:00
|
|
|
// X86-64 va_list is a struct { i32, i32, i8*, i8* }.
|
2008-04-18 20:55:41 +00:00
|
|
|
assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue DstPtr = Op.getOperand(1);
|
|
|
|
SDValue SrcPtr = Op.getOperand(2);
|
2008-02-06 22:27:42 +00:00
|
|
|
const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
|
|
|
|
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
|
2007-03-02 23:16:35 +00:00
|
|
|
|
2008-04-18 20:55:41 +00:00
|
|
|
return DAG.getMemcpy(Chain, DstPtr, SrcPtr,
|
|
|
|
DAG.getIntPtrConstant(24), 8, false,
|
|
|
|
DstSV, 0, SrcSV, 0);
|
2007-03-02 23:16:35 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue
|
|
|
|
X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) {
|
2008-09-12 16:56:44 +00:00
|
|
|
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
2006-04-25 20:13:52 +00:00
|
|
|
switch (IntNo) {
|
2008-07-27 21:46:04 +00:00
|
|
|
default: return SDValue(); // Don't custom lower most intrinsics.
|
2008-05-04 09:15:50 +00:00
|
|
|
// Comparison intrinsics.
|
2006-04-25 20:13:52 +00:00
|
|
|
case Intrinsic::x86_sse_comieq_ss:
|
|
|
|
case Intrinsic::x86_sse_comilt_ss:
|
|
|
|
case Intrinsic::x86_sse_comile_ss:
|
|
|
|
case Intrinsic::x86_sse_comigt_ss:
|
|
|
|
case Intrinsic::x86_sse_comige_ss:
|
|
|
|
case Intrinsic::x86_sse_comineq_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomieq_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomilt_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomile_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomigt_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomige_ss:
|
|
|
|
case Intrinsic::x86_sse_ucomineq_ss:
|
|
|
|
case Intrinsic::x86_sse2_comieq_sd:
|
|
|
|
case Intrinsic::x86_sse2_comilt_sd:
|
|
|
|
case Intrinsic::x86_sse2_comile_sd:
|
|
|
|
case Intrinsic::x86_sse2_comigt_sd:
|
|
|
|
case Intrinsic::x86_sse2_comige_sd:
|
|
|
|
case Intrinsic::x86_sse2_comineq_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomieq_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomilt_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomile_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomigt_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomige_sd:
|
|
|
|
case Intrinsic::x86_sse2_ucomineq_sd: {
|
|
|
|
unsigned Opc = 0;
|
|
|
|
ISD::CondCode CC = ISD::SETCC_INVALID;
|
|
|
|
switch (IntNo) {
|
|
|
|
default: break;
|
2006-11-21 00:01:06 +00:00
|
|
|
case Intrinsic::x86_sse_comieq_ss:
|
|
|
|
case Intrinsic::x86_sse2_comieq_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETEQ;
|
|
|
|
break;
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse_comilt_ss:
|
|
|
|
case Intrinsic::x86_sse2_comilt_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETLT;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_comile_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_comile_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETLE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_comigt_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_comigt_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETGT;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_comige_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_comige_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETGE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_comineq_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_comineq_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::COMI;
|
|
|
|
CC = ISD::SETNE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomieq_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_ucomieq_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETEQ;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomilt_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_ucomilt_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETLT;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomile_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_ucomile_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETLE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomigt_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_ucomigt_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETGT;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomige_ss:
|
2006-04-05 23:38:46 +00:00
|
|
|
case Intrinsic::x86_sse2_ucomige_sd:
|
2006-04-25 20:13:52 +00:00
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETGE;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse_ucomineq_ss:
|
|
|
|
case Intrinsic::x86_sse2_ucomineq_sd:
|
|
|
|
Opc = X86ISD::UCOMI;
|
|
|
|
CC = ISD::SETNE;
|
|
|
|
break;
|
2006-04-05 23:38:46 +00:00
|
|
|
}
|
2006-09-11 02:19:56 +00:00
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
unsigned X86CC;
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LHS = Op.getOperand(1);
|
|
|
|
SDValue RHS = Op.getOperand(2);
|
2006-09-13 03:22:10 +00:00
|
|
|
translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
|
2006-09-11 02:19:56 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS);
|
|
|
|
SDValue SetCC = DAG.getNode(X86ISD::SETCC, MVT::i8,
|
2008-08-17 19:22:34 +00:00
|
|
|
DAG.getConstant(X86CC, MVT::i8), Cond);
|
|
|
|
return DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, SetCC);
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
2008-05-04 09:15:50 +00:00
|
|
|
|
|
|
|
// Fix vector shift instructions where the last operand is a non-immediate
|
|
|
|
// i32 value.
|
|
|
|
case Intrinsic::x86_sse2_pslli_w:
|
|
|
|
case Intrinsic::x86_sse2_pslli_d:
|
|
|
|
case Intrinsic::x86_sse2_pslli_q:
|
|
|
|
case Intrinsic::x86_sse2_psrli_w:
|
|
|
|
case Intrinsic::x86_sse2_psrli_d:
|
|
|
|
case Intrinsic::x86_sse2_psrli_q:
|
|
|
|
case Intrinsic::x86_sse2_psrai_w:
|
|
|
|
case Intrinsic::x86_sse2_psrai_d:
|
|
|
|
case Intrinsic::x86_mmx_pslli_w:
|
|
|
|
case Intrinsic::x86_mmx_pslli_d:
|
|
|
|
case Intrinsic::x86_mmx_pslli_q:
|
|
|
|
case Intrinsic::x86_mmx_psrli_w:
|
|
|
|
case Intrinsic::x86_mmx_psrli_d:
|
|
|
|
case Intrinsic::x86_mmx_psrli_q:
|
|
|
|
case Intrinsic::x86_mmx_psrai_w:
|
|
|
|
case Intrinsic::x86_mmx_psrai_d: {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue ShAmt = Op.getOperand(2);
|
2008-05-04 09:15:50 +00:00
|
|
|
if (isa<ConstantSDNode>(ShAmt))
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-05-04 09:15:50 +00:00
|
|
|
|
|
|
|
unsigned NewIntNo = 0;
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT ShAmtVT = MVT::v4i32;
|
2008-05-04 09:15:50 +00:00
|
|
|
switch (IntNo) {
|
|
|
|
case Intrinsic::x86_sse2_pslli_w:
|
|
|
|
NewIntNo = Intrinsic::x86_sse2_psll_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_pslli_d:
|
|
|
|
NewIntNo = Intrinsic::x86_sse2_psll_d;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_pslli_q:
|
|
|
|
NewIntNo = Intrinsic::x86_sse2_psll_q;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_psrli_w:
|
|
|
|
NewIntNo = Intrinsic::x86_sse2_psrl_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_psrli_d:
|
|
|
|
NewIntNo = Intrinsic::x86_sse2_psrl_d;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_psrli_q:
|
|
|
|
NewIntNo = Intrinsic::x86_sse2_psrl_q;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_psrai_w:
|
|
|
|
NewIntNo = Intrinsic::x86_sse2_psra_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_sse2_psrai_d:
|
|
|
|
NewIntNo = Intrinsic::x86_sse2_psra_d;
|
|
|
|
break;
|
|
|
|
default: {
|
|
|
|
ShAmtVT = MVT::v2i32;
|
|
|
|
switch (IntNo) {
|
|
|
|
case Intrinsic::x86_mmx_pslli_w:
|
|
|
|
NewIntNo = Intrinsic::x86_mmx_psll_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_pslli_d:
|
|
|
|
NewIntNo = Intrinsic::x86_mmx_psll_d;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_pslli_q:
|
|
|
|
NewIntNo = Intrinsic::x86_mmx_psll_q;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrli_w:
|
|
|
|
NewIntNo = Intrinsic::x86_mmx_psrl_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrli_d:
|
|
|
|
NewIntNo = Intrinsic::x86_mmx_psrl_d;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrli_q:
|
|
|
|
NewIntNo = Intrinsic::x86_mmx_psrl_q;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrai_w:
|
|
|
|
NewIntNo = Intrinsic::x86_mmx_psra_w;
|
|
|
|
break;
|
|
|
|
case Intrinsic::x86_mmx_psrai_d:
|
|
|
|
NewIntNo = Intrinsic::x86_mmx_psra_d;
|
|
|
|
break;
|
|
|
|
default: abort(); // Can't reach here.
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
2008-05-04 09:15:50 +00:00
|
|
|
ShAmt = DAG.getNode(ISD::BIT_CONVERT, VT,
|
|
|
|
DAG.getNode(ISD::SCALAR_TO_VECTOR, ShAmtVT, ShAmt));
|
|
|
|
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VT,
|
|
|
|
DAG.getConstant(NewIntNo, MVT::i32),
|
|
|
|
Op.getOperand(1), ShAmt);
|
|
|
|
}
|
2006-04-05 23:38:46 +00:00
|
|
|
}
|
2006-04-25 20:13:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) {
|
2007-01-29 22:58:52 +00:00
|
|
|
// Depths > 0 not supported yet!
|
2008-09-12 16:56:44 +00:00
|
|
|
if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2007-01-29 22:58:52 +00:00
|
|
|
|
|
|
|
// Just load the return address
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
|
2007-01-29 22:58:52 +00:00
|
|
|
return DAG.getLoad(getPointerTy(), DAG.getEntryNode(), RetAddrFI, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
|
2008-09-27 01:56:22 +00:00
|
|
|
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
|
|
|
|
MFI->setFrameAddressIsTaken(true);
|
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
|
|
|
|
unsigned FrameReg = Subtarget->is64Bit() ? X86::RBP : X86::EBP;
|
|
|
|
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), FrameReg, VT);
|
|
|
|
while (Depth--)
|
|
|
|
FrameAddr = DAG.getLoad(VT, DAG.getEntryNode(), FrameAddr, NULL, 0);
|
|
|
|
return FrameAddr;
|
2007-01-29 22:58:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
|
2008-09-08 21:12:11 +00:00
|
|
|
SelectionDAG &DAG) {
|
2008-09-09 18:22:57 +00:00
|
|
|
return DAG.getIntPtrConstant(2*TD->getPointerSize());
|
2007-07-14 14:06:15 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
|
2007-07-14 14:06:15 +00:00
|
|
|
{
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = Op.getOperand(0);
|
|
|
|
SDValue Offset = Op.getOperand(1);
|
|
|
|
SDValue Handler = Op.getOperand(2);
|
2007-07-14 14:06:15 +00:00
|
|
|
|
2008-09-08 21:12:47 +00:00
|
|
|
SDValue Frame = DAG.getRegister(Subtarget->is64Bit() ? X86::RBP : X86::EBP,
|
|
|
|
getPointerTy());
|
|
|
|
unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX);
|
2007-07-14 14:06:15 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue StoreAddr = DAG.getNode(ISD::SUB, getPointerTy(), Frame,
|
2008-09-09 18:22:57 +00:00
|
|
|
DAG.getIntPtrConstant(-TD->getPointerSize()));
|
2007-07-14 14:06:15 +00:00
|
|
|
StoreAddr = DAG.getNode(ISD::ADD, getPointerTy(), StoreAddr, Offset);
|
|
|
|
Chain = DAG.getStore(Chain, Handler, StoreAddr, NULL, 0);
|
2008-09-08 21:12:47 +00:00
|
|
|
Chain = DAG.getCopyToReg(Chain, StoreAddrReg, StoreAddr);
|
|
|
|
MF.getRegInfo().addLiveOut(StoreAddrReg);
|
2007-07-14 14:06:15 +00:00
|
|
|
|
2008-09-08 21:12:47 +00:00
|
|
|
return DAG.getNode(X86ISD::EH_RETURN,
|
|
|
|
MVT::Other,
|
|
|
|
Chain, DAG.getRegister(StoreAddrReg, getPointerTy()));
|
2007-07-14 14:06:15 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op,
|
2007-07-27 20:02:49 +00:00
|
|
|
SelectionDAG &DAG) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Root = Op.getOperand(0);
|
|
|
|
SDValue Trmp = Op.getOperand(1); // trampoline
|
|
|
|
SDValue FPtr = Op.getOperand(2); // nested function
|
|
|
|
SDValue Nest = Op.getOperand(3); // 'nest' parameter value
|
2007-07-27 20:02:49 +00:00
|
|
|
|
2008-02-06 22:27:42 +00:00
|
|
|
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
|
2007-07-27 20:02:49 +00:00
|
|
|
|
2008-01-16 22:55:25 +00:00
|
|
|
const X86InstrInfo *TII =
|
|
|
|
((X86TargetMachine&)getTargetMachine()).getInstrInfo();
|
|
|
|
|
2007-07-27 20:02:49 +00:00
|
|
|
if (Subtarget->is64Bit()) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue OutChains[6];
|
2008-01-16 22:55:25 +00:00
|
|
|
|
|
|
|
// Large code-model.
|
|
|
|
|
|
|
|
const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r);
|
|
|
|
const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri);
|
|
|
|
|
2008-05-14 01:58:56 +00:00
|
|
|
const unsigned char N86R10 = RegInfo->getX86RegNum(X86::R10);
|
|
|
|
const unsigned char N86R11 = RegInfo->getX86RegNum(X86::R11);
|
2008-01-16 22:55:25 +00:00
|
|
|
|
|
|
|
const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
|
|
|
|
|
|
|
|
// Load the pointer to the nested function into R11.
|
|
|
|
unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Addr = Trmp;
|
2008-01-16 22:55:25 +00:00
|
|
|
OutChains[0] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
|
2008-02-06 22:27:42 +00:00
|
|
|
TrmpAddr, 0);
|
2008-01-16 22:55:25 +00:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(2, MVT::i64));
|
2008-02-06 22:27:42 +00:00
|
|
|
OutChains[1] = DAG.getStore(Root, FPtr, Addr, TrmpAddr, 2, false, 2);
|
2008-01-16 22:55:25 +00:00
|
|
|
|
|
|
|
// Load the 'nest' parameter value into R10.
|
|
|
|
// R10 is specified in X86CallingConv.td
|
|
|
|
OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(10, MVT::i64));
|
|
|
|
OutChains[2] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
|
2008-02-06 22:27:42 +00:00
|
|
|
TrmpAddr, 10);
|
2008-01-16 22:55:25 +00:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(12, MVT::i64));
|
2008-02-06 22:27:42 +00:00
|
|
|
OutChains[3] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 12, false, 2);
|
2008-01-16 22:55:25 +00:00
|
|
|
|
|
|
|
// Jump to the nested function.
|
|
|
|
OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(20, MVT::i64));
|
|
|
|
OutChains[4] = DAG.getStore(Root, DAG.getConstant(OpCode, MVT::i16), Addr,
|
2008-02-06 22:27:42 +00:00
|
|
|
TrmpAddr, 20);
|
2008-01-16 22:55:25 +00:00
|
|
|
|
|
|
|
unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i64, Trmp, DAG.getConstant(22, MVT::i64));
|
|
|
|
OutChains[5] = DAG.getStore(Root, DAG.getConstant(ModRM, MVT::i8), Addr,
|
2008-02-06 22:27:42 +00:00
|
|
|
TrmpAddr, 22);
|
2008-01-16 22:55:25 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] =
|
2008-01-16 22:55:25 +00:00
|
|
|
{ Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 6) };
|
2008-07-02 17:40:58 +00:00
|
|
|
return DAG.getMergeValues(Ops, 2);
|
2007-07-27 20:02:49 +00:00
|
|
|
} else {
|
2008-01-31 01:01:48 +00:00
|
|
|
const Function *Func =
|
2007-07-27 20:02:49 +00:00
|
|
|
cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
|
|
|
|
unsigned CC = Func->getCallingConv();
|
2007-08-29 19:01:20 +00:00
|
|
|
unsigned NestReg;
|
2007-07-27 20:02:49 +00:00
|
|
|
|
|
|
|
switch (CC) {
|
|
|
|
default:
|
|
|
|
assert(0 && "Unsupported calling convention");
|
|
|
|
case CallingConv::C:
|
|
|
|
case CallingConv::X86_StdCall: {
|
|
|
|
// Pass 'nest' parameter in ECX.
|
|
|
|
// Must be kept in sync with X86CallingConv.td
|
2007-08-29 19:01:20 +00:00
|
|
|
NestReg = X86::ECX;
|
2007-07-27 20:02:49 +00:00
|
|
|
|
|
|
|
// Check that ECX wasn't needed by an 'inreg' parameter.
|
|
|
|
const FunctionType *FTy = Func->getFunctionType();
|
2008-09-25 21:00:45 +00:00
|
|
|
const AttrListPtr &Attrs = Func->getAttributes();
|
2007-07-27 20:02:49 +00:00
|
|
|
|
2008-03-12 17:45:29 +00:00
|
|
|
if (!Attrs.isEmpty() && !Func->isVarArg()) {
|
2007-07-27 20:02:49 +00:00
|
|
|
unsigned InRegCount = 0;
|
|
|
|
unsigned Idx = 1;
|
|
|
|
|
|
|
|
for (FunctionType::param_iterator I = FTy->param_begin(),
|
|
|
|
E = FTy->param_end(); I != E; ++I, ++Idx)
|
2008-09-25 21:00:45 +00:00
|
|
|
if (Attrs.paramHasAttr(Idx, Attribute::InReg))
|
2007-07-27 20:02:49 +00:00
|
|
|
// FIXME: should only count parameters that are lowered to integers.
|
2008-09-09 18:22:57 +00:00
|
|
|
InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
|
2007-07-27 20:02:49 +00:00
|
|
|
|
|
|
|
if (InRegCount > 2) {
|
|
|
|
cerr << "Nest register in use - reduce number of inreg parameters!\n";
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case CallingConv::X86_FastCall:
|
2008-09-10 13:22:10 +00:00
|
|
|
case CallingConv::Fast:
|
2007-07-27 20:02:49 +00:00
|
|
|
// Pass 'nest' parameter in EAX.
|
|
|
|
// Must be kept in sync with X86CallingConv.td
|
2007-08-29 19:01:20 +00:00
|
|
|
NestReg = X86::EAX;
|
2007-07-27 20:02:49 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue OutChains[4];
|
|
|
|
SDValue Addr, Disp;
|
2007-07-27 20:02:49 +00:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32));
|
|
|
|
Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr);
|
|
|
|
|
2008-01-16 22:55:25 +00:00
|
|
|
const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
|
2008-05-14 01:58:56 +00:00
|
|
|
const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg);
|
2007-08-29 19:01:20 +00:00
|
|
|
OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
|
2008-02-06 22:27:42 +00:00
|
|
|
Trmp, TrmpAddr, 0);
|
2007-07-27 20:02:49 +00:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32));
|
2008-02-06 22:27:42 +00:00
|
|
|
OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpAddr, 1, false, 1);
|
2007-07-27 20:02:49 +00:00
|
|
|
|
2008-01-16 22:55:25 +00:00
|
|
|
const unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
|
2007-07-27 20:02:49 +00:00
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32));
|
|
|
|
OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr,
|
2008-02-06 22:27:42 +00:00
|
|
|
TrmpAddr, 5, false, 1);
|
2007-07-27 20:02:49 +00:00
|
|
|
|
|
|
|
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(6, MVT::i32));
|
2008-02-06 22:27:42 +00:00
|
|
|
OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpAddr, 6, false, 1);
|
2007-07-27 20:02:49 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] =
|
2007-09-11 14:10:23 +00:00
|
|
|
{ Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) };
|
2008-07-02 17:40:58 +00:00
|
|
|
return DAG.getMergeValues(Ops, 2);
|
2007-07-27 20:02:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) {
|
2007-11-16 01:31:51 +00:00
|
|
|
/*
|
|
|
|
The rounding mode is in bits 11:10 of FPSR, and has the following
|
|
|
|
settings:
|
|
|
|
00 Round to nearest
|
|
|
|
01 Round to -inf
|
|
|
|
10 Round to +inf
|
|
|
|
11 Round to 0
|
|
|
|
|
|
|
|
FLT_ROUNDS, on the other hand, expects the following:
|
|
|
|
-1 Undefined
|
|
|
|
0 Round to 0
|
|
|
|
1 Round to nearest
|
|
|
|
2 Round to +inf
|
|
|
|
3 Round to -inf
|
|
|
|
|
|
|
|
To perform the conversion, we do:
|
|
|
|
(((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
|
|
|
|
*/
|
|
|
|
|
|
|
|
MachineFunction &MF = DAG.getMachineFunction();
|
|
|
|
const TargetMachine &TM = MF.getTarget();
|
|
|
|
const TargetFrameInfo &TFI = *TM.getFrameInfo();
|
|
|
|
unsigned StackAlignment = TFI.getStackAlignment();
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
2007-11-16 01:31:51 +00:00
|
|
|
|
|
|
|
// Save FP Control Word to stack slot
|
|
|
|
int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
|
2007-11-16 01:31:51 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = DAG.getNode(X86ISD::FNSTCW16m, MVT::Other,
|
2008-09-24 23:26:36 +00:00
|
|
|
DAG.getEntryNode(), StackSlot);
|
2007-11-16 01:31:51 +00:00
|
|
|
|
|
|
|
// Load FP Control Word from stack slot
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue CWD = DAG.getLoad(MVT::i16, Chain, StackSlot, NULL, 0);
|
2007-11-16 01:31:51 +00:00
|
|
|
|
|
|
|
// Transform as necessary
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue CWD1 =
|
2007-11-16 01:31:51 +00:00
|
|
|
DAG.getNode(ISD::SRL, MVT::i16,
|
|
|
|
DAG.getNode(ISD::AND, MVT::i16,
|
|
|
|
CWD, DAG.getConstant(0x800, MVT::i16)),
|
|
|
|
DAG.getConstant(11, MVT::i8));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue CWD2 =
|
2007-11-16 01:31:51 +00:00
|
|
|
DAG.getNode(ISD::SRL, MVT::i16,
|
|
|
|
DAG.getNode(ISD::AND, MVT::i16,
|
|
|
|
CWD, DAG.getConstant(0x400, MVT::i16)),
|
|
|
|
DAG.getConstant(9, MVT::i8));
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue RetVal =
|
2007-11-16 01:31:51 +00:00
|
|
|
DAG.getNode(ISD::AND, MVT::i16,
|
|
|
|
DAG.getNode(ISD::ADD, MVT::i16,
|
|
|
|
DAG.getNode(ISD::OR, MVT::i16, CWD1, CWD2),
|
|
|
|
DAG.getConstant(1, MVT::i16)),
|
|
|
|
DAG.getConstant(3, MVT::i16));
|
|
|
|
|
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
return DAG.getNode((VT.getSizeInBits() < 16 ?
|
2007-11-16 01:31:51 +00:00
|
|
|
ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT OpVT = VT;
|
|
|
|
unsigned NumBits = VT.getSizeInBits();
|
2007-12-14 02:13:44 +00:00
|
|
|
|
|
|
|
Op = Op.getOperand(0);
|
|
|
|
if (VT == MVT::i8) {
|
2007-12-14 08:30:15 +00:00
|
|
|
// Zero extend to i32 since there is not an i8 bsr.
|
2007-12-14 02:13:44 +00:00
|
|
|
OpVT = MVT::i32;
|
|
|
|
Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op);
|
|
|
|
}
|
|
|
|
|
2007-12-14 08:30:15 +00:00
|
|
|
// Issue a bsr (scan bits in reverse) which also sets EFLAGS.
|
|
|
|
SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
|
|
|
|
Op = DAG.getNode(X86ISD::BSR, VTs, Op);
|
|
|
|
|
|
|
|
// If src is zero (i.e. bsr sets ZF), returns NumBits.
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 4> Ops;
|
2007-12-14 08:30:15 +00:00
|
|
|
Ops.push_back(Op);
|
|
|
|
Ops.push_back(DAG.getConstant(NumBits+NumBits-1, OpVT));
|
|
|
|
Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
|
|
|
|
Ops.push_back(Op.getValue(1));
|
|
|
|
Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4);
|
|
|
|
|
|
|
|
// Finally xor with NumBits-1.
|
|
|
|
Op = DAG.getNode(ISD::XOR, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
|
|
|
|
|
2007-12-14 02:13:44 +00:00
|
|
|
if (VT == MVT::i8)
|
|
|
|
Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op);
|
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = Op.getValueType();
|
|
|
|
MVT OpVT = VT;
|
|
|
|
unsigned NumBits = VT.getSizeInBits();
|
2007-12-14 02:13:44 +00:00
|
|
|
|
|
|
|
Op = Op.getOperand(0);
|
|
|
|
if (VT == MVT::i8) {
|
|
|
|
OpVT = MVT::i32;
|
|
|
|
Op = DAG.getNode(ISD::ZERO_EXTEND, OpVT, Op);
|
|
|
|
}
|
2007-12-14 08:30:15 +00:00
|
|
|
|
|
|
|
// Issue a bsf (scan bits forward) which also sets EFLAGS.
|
|
|
|
SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
|
|
|
|
Op = DAG.getNode(X86ISD::BSF, VTs, Op);
|
|
|
|
|
|
|
|
// If src is zero (i.e. bsf sets ZF), returns NumBits.
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 4> Ops;
|
2007-12-14 08:30:15 +00:00
|
|
|
Ops.push_back(Op);
|
|
|
|
Ops.push_back(DAG.getConstant(NumBits, OpVT));
|
|
|
|
Ops.push_back(DAG.getConstant(X86::COND_E, MVT::i8));
|
|
|
|
Ops.push_back(Op.getValue(1));
|
|
|
|
Op = DAG.getNode(X86ISD::CMOV, OpVT, &Ops[0], 4);
|
|
|
|
|
2007-12-14 02:13:44 +00:00
|
|
|
if (VT == MVT::i8)
|
|
|
|
Op = DAG.getNode(ISD::TRUNCATE, MVT::i8, Op);
|
|
|
|
return Op;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) {
|
2008-06-25 16:07:49 +00:00
|
|
|
MVT T = Op.getValueType();
|
2008-03-04 21:13:33 +00:00
|
|
|
unsigned Reg = 0;
|
|
|
|
unsigned size = 0;
|
2008-06-06 12:08:01 +00:00
|
|
|
switch(T.getSimpleVT()) {
|
|
|
|
default:
|
|
|
|
assert(false && "Invalid value type!");
|
2008-03-01 21:52:34 +00:00
|
|
|
case MVT::i8: Reg = X86::AL; size = 1; break;
|
|
|
|
case MVT::i16: Reg = X86::AX; size = 2; break;
|
|
|
|
case MVT::i32: Reg = X86::EAX; size = 4; break;
|
2008-03-05 01:15:49 +00:00
|
|
|
case MVT::i64:
|
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
Reg = X86::RAX; size = 8;
|
2008-10-20 15:56:33 +00:00
|
|
|
} else //Should go away when LegalizeType stuff lands
|
2008-08-28 21:40:38 +00:00
|
|
|
return SDValue(ExpandATOMIC_CMP_SWAP(Op.getNode(), DAG), 0);
|
2008-03-05 01:15:49 +00:00
|
|
|
break;
|
2008-03-01 21:52:34 +00:00
|
|
|
};
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), Reg,
|
2008-09-11 03:12:59 +00:00
|
|
|
Op.getOperand(2), SDValue());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { cpIn.getValue(0),
|
2008-09-24 23:26:36 +00:00
|
|
|
Op.getOperand(1),
|
|
|
|
Op.getOperand(3),
|
|
|
|
DAG.getTargetConstant(size, MVT::i8),
|
|
|
|
cpIn.getValue(1) };
|
2008-03-01 21:52:34 +00:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Result = DAG.getNode(X86ISD::LCMPXCHG_DAG, Tys, Ops, 5);
|
|
|
|
SDValue cpOut =
|
2008-03-01 21:52:34 +00:00
|
|
|
DAG.getCopyFromReg(Result.getValue(0), Reg, T, Result.getValue(1));
|
|
|
|
return cpOut;
|
|
|
|
}
|
|
|
|
|
2008-08-28 23:19:51 +00:00
|
|
|
SDNode* X86TargetLowering::ExpandATOMIC_CMP_SWAP(SDNode* Op,
|
|
|
|
SelectionDAG &DAG) {
|
2008-06-25 16:07:49 +00:00
|
|
|
MVT T = Op->getValueType(0);
|
2008-06-25 08:15:39 +00:00
|
|
|
assert (T == MVT::i64 && "Only know how to expand i64 Cmp and Swap");
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue cpInL, cpInH;
|
2008-09-11 03:12:59 +00:00
|
|
|
cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2),
|
2008-03-05 01:15:49 +00:00
|
|
|
DAG.getConstant(0, MVT::i32));
|
2008-09-11 03:12:59 +00:00
|
|
|
cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(2),
|
2008-03-05 01:15:49 +00:00
|
|
|
DAG.getConstant(1, MVT::i32));
|
|
|
|
cpInL = DAG.getCopyToReg(Op->getOperand(0), X86::EAX,
|
2008-07-27 21:46:04 +00:00
|
|
|
cpInL, SDValue());
|
2008-03-05 01:15:49 +00:00
|
|
|
cpInH = DAG.getCopyToReg(cpInL.getValue(0), X86::EDX,
|
|
|
|
cpInH, cpInL.getValue(1));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue swapInL, swapInH;
|
2008-09-11 03:12:59 +00:00
|
|
|
swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3),
|
2008-03-05 01:15:49 +00:00
|
|
|
DAG.getConstant(0, MVT::i32));
|
2008-09-11 03:12:59 +00:00
|
|
|
swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3),
|
2008-03-05 01:15:49 +00:00
|
|
|
DAG.getConstant(1, MVT::i32));
|
|
|
|
swapInL = DAG.getCopyToReg(cpInH.getValue(0), X86::EBX,
|
|
|
|
swapInL, cpInH.getValue(1));
|
|
|
|
swapInH = DAG.getCopyToReg(swapInL.getValue(0), X86::ECX,
|
|
|
|
swapInH, swapInL.getValue(1));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Ops[] = { swapInH.getValue(0),
|
2008-09-24 23:26:36 +00:00
|
|
|
Op->getOperand(1),
|
|
|
|
swapInH.getValue(1) };
|
2008-03-05 01:15:49 +00:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Result = DAG.getNode(X86ISD::LCMPXCHG8_DAG, Tys, Ops, 3);
|
|
|
|
SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), X86::EAX, MVT::i32,
|
2008-03-05 01:15:49 +00:00
|
|
|
Result.getValue(1));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), X86::EDX, MVT::i32,
|
2008-03-05 01:15:49 +00:00
|
|
|
cpOutL.getValue(2));
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
|
|
|
|
SDValue ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2);
|
|
|
|
SDValue Vals[2] = { ResultVal, cpOutH.getValue(1) };
|
2008-08-28 21:40:38 +00:00
|
|
|
return DAG.getMergeValues(Vals, 2).getNode();
|
2008-03-05 01:15:49 +00:00
|
|
|
}
|
|
|
|
|
2008-10-02 18:53:47 +00:00
|
|
|
SDValue X86TargetLowering::LowerATOMIC_BINARY_64(SDValue Op,
|
|
|
|
SelectionDAG &DAG,
|
|
|
|
unsigned NewOp) {
|
|
|
|
SDNode *Node = Op.getNode();
|
|
|
|
MVT T = Node->getValueType(0);
|
|
|
|
assert (T == MVT::i64 && "Only know how to expand i64 atomics");
|
|
|
|
|
|
|
|
SDValue Chain = Node->getOperand(0);
|
|
|
|
SDValue In1 = Node->getOperand(1);
|
2008-10-20 15:56:33 +00:00
|
|
|
SDValue In2L = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
|
|
|
|
Node->getOperand(2), DAG.getIntPtrConstant(0));
|
|
|
|
SDValue In2H = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32,
|
|
|
|
Node->getOperand(2), DAG.getIntPtrConstant(1));
|
2008-10-03 19:41:08 +00:00
|
|
|
// This is a generalized SDNode, not an AtomicSDNode, so it doesn't
|
|
|
|
// have a MemOperand. Pass the info through as a normal operand.
|
|
|
|
SDValue LSI = DAG.getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
|
|
|
|
SDValue Ops[] = { Chain, In1, In2L, In2H, LSI };
|
2008-10-02 18:53:47 +00:00
|
|
|
SDVTList Tys = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
|
2008-10-03 19:41:08 +00:00
|
|
|
SDValue Result = DAG.getNode(NewOp, Tys, Ops, 5);
|
2008-10-02 18:53:47 +00:00
|
|
|
SDValue OpsF[] = { Result.getValue(0), Result.getValue(1)};
|
|
|
|
SDValue ResultVal = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, OpsF, 2);
|
|
|
|
SDValue Vals[2] = { ResultVal, Result.getValue(2) };
|
|
|
|
return SDValue(DAG.getMergeValues(Vals, 2).getNode(), 0);
|
|
|
|
}
|
|
|
|
|
2008-09-29 22:25:26 +00:00
|
|
|
SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
|
|
|
|
SDNode *Node = Op.getNode();
|
|
|
|
MVT T = Node->getValueType(0);
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue negOp = DAG.getNode(ISD::SUB, T,
|
2008-09-29 22:25:26 +00:00
|
|
|
DAG.getConstant(0, T), Node->getOperand(2));
|
|
|
|
return DAG.getAtomic((Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_8 ?
|
|
|
|
ISD::ATOMIC_LOAD_ADD_8 :
|
|
|
|
Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_16 ?
|
|
|
|
ISD::ATOMIC_LOAD_ADD_16 :
|
|
|
|
Op.getOpcode()==ISD::ATOMIC_LOAD_SUB_32 ?
|
|
|
|
ISD::ATOMIC_LOAD_ADD_32 :
|
|
|
|
ISD::ATOMIC_LOAD_ADD_64),
|
|
|
|
Node->getOperand(0),
|
|
|
|
Node->getOperand(1), negOp,
|
|
|
|
cast<AtomicSDNode>(Node)->getSrcValue(),
|
|
|
|
cast<AtomicSDNode>(Node)->getAlignment());
|
2008-05-05 19:05:59 +00:00
|
|
|
}
|
|
|
|
|
2006-04-25 20:13:52 +00:00
|
|
|
/// LowerOperation - Provide custom lowering hooks for some operations.
|
|
|
|
///
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
|
2006-04-25 20:13:52 +00:00
|
|
|
switch (Op.getOpcode()) {
|
|
|
|
default: assert(0 && "Should not custom lower this!");
|
2008-10-20 15:56:33 +00:00
|
|
|
case ISD::ATOMIC_CMP_SWAP_8:
|
|
|
|
case ISD::ATOMIC_CMP_SWAP_16:
|
|
|
|
case ISD::ATOMIC_CMP_SWAP_32:
|
2008-08-28 02:44:49 +00:00
|
|
|
case ISD::ATOMIC_CMP_SWAP_64: return LowerCMP_SWAP(Op,DAG);
|
2008-10-20 15:56:33 +00:00
|
|
|
case ISD::ATOMIC_LOAD_SUB_8:
|
|
|
|
case ISD::ATOMIC_LOAD_SUB_16:
|
2008-09-29 22:25:26 +00:00
|
|
|
case ISD::ATOMIC_LOAD_SUB_32: return LowerLOAD_SUB(Op,DAG);
|
2008-10-02 18:53:47 +00:00
|
|
|
case ISD::ATOMIC_LOAD_SUB_64: return (Subtarget->is64Bit()) ?
|
2008-10-03 22:25:52 +00:00
|
|
|
LowerLOAD_SUB(Op,DAG) :
|
|
|
|
LowerATOMIC_BINARY_64(Op,DAG,
|
2008-10-02 18:53:47 +00:00
|
|
|
X86ISD::ATOMSUB64_DAG);
|
|
|
|
case ISD::ATOMIC_LOAD_AND_64: return LowerATOMIC_BINARY_64(Op,DAG,
|
|
|
|
X86ISD::ATOMAND64_DAG);
|
2008-10-03 22:25:52 +00:00
|
|
|
case ISD::ATOMIC_LOAD_OR_64: return LowerATOMIC_BINARY_64(Op, DAG,
|
2008-10-02 18:53:47 +00:00
|
|
|
X86ISD::ATOMOR64_DAG);
|
|
|
|
case ISD::ATOMIC_LOAD_XOR_64: return LowerATOMIC_BINARY_64(Op,DAG,
|
|
|
|
X86ISD::ATOMXOR64_DAG);
|
2008-10-03 22:25:52 +00:00
|
|
|
case ISD::ATOMIC_LOAD_NAND_64:return LowerATOMIC_BINARY_64(Op,DAG,
|
2008-10-02 18:53:47 +00:00
|
|
|
X86ISD::ATOMNAND64_DAG);
|
|
|
|
case ISD::ATOMIC_LOAD_ADD_64: return LowerATOMIC_BINARY_64(Op,DAG,
|
|
|
|
X86ISD::ATOMADD64_DAG);
|
2008-10-03 22:25:52 +00:00
|
|
|
case ISD::ATOMIC_SWAP_64: return LowerATOMIC_BINARY_64(Op,DAG,
|
|
|
|
X86ISD::ATOMSWAP64_DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
|
|
|
|
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
|
|
|
|
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
|
|
|
|
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
|
|
|
|
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
|
|
|
|
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
|
|
|
|
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
|
2007-04-20 21:38:10 +00:00
|
|
|
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
|
2008-09-16 21:48:12 +00:00
|
|
|
case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
case ISD::SHL_PARTS:
|
|
|
|
case ISD::SRA_PARTS:
|
|
|
|
case ISD::SRL_PARTS: return LowerShift(Op, DAG);
|
|
|
|
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
|
2008-10-21 20:50:01 +00:00
|
|
|
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
|
|
|
|
case ISD::FABS: return LowerFABS(Op, DAG);
|
|
|
|
case ISD::FNEG: return LowerFNEG(Op, DAG);
|
2007-01-05 07:55:56 +00:00
|
|
|
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
|
2007-09-29 00:00:36 +00:00
|
|
|
case ISD::SETCC: return LowerSETCC(Op, DAG);
|
2008-07-17 16:51:19 +00:00
|
|
|
case ISD::VSETCC: return LowerVSETCC(Op, DAG);
|
2007-09-29 00:00:36 +00:00
|
|
|
case ISD::SELECT: return LowerSELECT(Op, DAG);
|
|
|
|
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
|
2006-05-25 00:59:30 +00:00
|
|
|
case ISD::CALL: return LowerCALL(Op, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
case ISD::RET: return LowerRET(Op, DAG);
|
2006-04-26 01:20:17 +00:00
|
|
|
case ISD::FORMAL_ARGUMENTS: return LowerFORMAL_ARGUMENTS(Op, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
case ISD::VASTART: return LowerVASTART(Op, DAG);
|
2008-05-10 01:26:14 +00:00
|
|
|
case ISD::VAARG: return LowerVAARG(Op, DAG);
|
2007-03-02 23:16:35 +00:00
|
|
|
case ISD::VACOPY: return LowerVACOPY(Op, DAG);
|
2006-04-25 20:13:52 +00:00
|
|
|
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
|
2007-01-29 22:58:52 +00:00
|
|
|
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
|
|
|
|
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
|
2007-07-14 14:06:15 +00:00
|
|
|
case ISD::FRAME_TO_ARGS_OFFSET:
|
|
|
|
return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
|
2007-04-17 09:20:00 +00:00
|
|
|
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
|
2007-07-14 14:06:15 +00:00
|
|
|
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
|
2007-07-27 20:02:49 +00:00
|
|
|
case ISD::TRAMPOLINE: return LowerTRAMPOLINE(Op, DAG);
|
2008-01-31 00:41:03 +00:00
|
|
|
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
|
2007-12-14 02:13:44 +00:00
|
|
|
case ISD::CTLZ: return LowerCTLZ(Op, DAG);
|
|
|
|
case ISD::CTTZ: return LowerCTTZ(Op, DAG);
|
2007-11-24 07:07:01 +00:00
|
|
|
|
|
|
|
// FIXME: REMOVE THIS WHEN LegalizeDAGTypes lands.
|
|
|
|
case ISD::READCYCLECOUNTER:
|
2008-08-28 21:40:38 +00:00
|
|
|
return SDValue(ExpandREADCYCLECOUNTER(Op.getNode(), DAG), 0);
|
2007-11-24 07:07:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-04 11:47:58 +00:00
|
|
|
/// ReplaceNodeResults - Replace a node with an illegal result type
|
|
|
|
/// with a new node built out of custom code.
|
|
|
|
SDNode *X86TargetLowering::ReplaceNodeResults(SDNode *N, SelectionDAG &DAG) {
|
2007-11-24 07:07:01 +00:00
|
|
|
switch (N->getOpcode()) {
|
2008-10-20 15:56:33 +00:00
|
|
|
default:
|
|
|
|
return X86TargetLowering::LowerOperation(SDValue (N, 0), DAG).getNode();
|
2007-11-24 07:07:01 +00:00
|
|
|
case ISD::FP_TO_SINT: return ExpandFP_TO_SINT(N, DAG);
|
|
|
|
case ISD::READCYCLECOUNTER: return ExpandREADCYCLECOUNTER(N, DAG);
|
2008-08-28 02:44:49 +00:00
|
|
|
case ISD::ATOMIC_CMP_SWAP_64: return ExpandATOMIC_CMP_SWAP(N, DAG);
|
2005-12-23 07:31:11 +00:00
|
|
|
}
|
2005-11-15 00:40:23 +00:00
|
|
|
}
|
2005-12-20 06:22:03 +00:00
|
|
|
|
|
|
|
const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|
|
|
switch (Opcode) {
|
|
|
|
default: return NULL;
|
2007-12-14 02:13:44 +00:00
|
|
|
case X86ISD::BSF: return "X86ISD::BSF";
|
|
|
|
case X86ISD::BSR: return "X86ISD::BSR";
|
2006-01-09 18:33:28 +00:00
|
|
|
case X86ISD::SHLD: return "X86ISD::SHLD";
|
|
|
|
case X86ISD::SHRD: return "X86ISD::SHRD";
|
2006-01-31 03:14:29 +00:00
|
|
|
case X86ISD::FAND: return "X86ISD::FAND";
|
2007-01-05 07:55:56 +00:00
|
|
|
case X86ISD::FOR: return "X86ISD::FOR";
|
2006-01-31 22:28:30 +00:00
|
|
|
case X86ISD::FXOR: return "X86ISD::FXOR";
|
2007-01-05 07:55:56 +00:00
|
|
|
case X86ISD::FSRL: return "X86ISD::FSRL";
|
2006-01-12 22:54:21 +00:00
|
|
|
case X86ISD::FILD: return "X86ISD::FILD";
|
2006-02-04 02:20:30 +00:00
|
|
|
case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
|
2005-12-20 06:22:03 +00:00
|
|
|
case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
|
|
|
|
case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
|
|
|
|
case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
|
2005-12-21 02:39:21 +00:00
|
|
|
case X86ISD::FLD: return "X86ISD::FLD";
|
2006-01-05 00:27:02 +00:00
|
|
|
case X86ISD::FST: return "X86ISD::FST";
|
2005-12-20 06:22:03 +00:00
|
|
|
case X86ISD::CALL: return "X86ISD::CALL";
|
|
|
|
case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
|
|
|
|
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
|
|
|
|
case X86ISD::CMP: return "X86ISD::CMP";
|
2006-04-05 23:38:46 +00:00
|
|
|
case X86ISD::COMI: return "X86ISD::COMI";
|
|
|
|
case X86ISD::UCOMI: return "X86ISD::UCOMI";
|
2005-12-21 20:21:51 +00:00
|
|
|
case X86ISD::SETCC: return "X86ISD::SETCC";
|
2005-12-20 06:22:03 +00:00
|
|
|
case X86ISD::CMOV: return "X86ISD::CMOV";
|
|
|
|
case X86ISD::BRCOND: return "X86ISD::BRCOND";
|
2005-12-21 02:39:21 +00:00
|
|
|
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
|
2006-03-04 01:12:00 +00:00
|
|
|
case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
|
|
|
|
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
|
2006-02-18 00:15:05 +00:00
|
|
|
case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
|
2006-02-23 20:41:18 +00:00
|
|
|
case X86ISD::Wrapper: return "X86ISD::Wrapper";
|
2008-02-11 04:19:36 +00:00
|
|
|
case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
|
2006-03-31 19:22:53 +00:00
|
|
|
case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
|
2008-02-11 04:19:36 +00:00
|
|
|
case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
|
|
|
|
case X86ISD::PINSRB: return "X86ISD::PINSRB";
|
2006-03-31 21:55:24 +00:00
|
|
|
case X86ISD::PINSRW: return "X86ISD::PINSRW";
|
2006-11-10 21:43:37 +00:00
|
|
|
case X86ISD::FMAX: return "X86ISD::FMAX";
|
|
|
|
case X86ISD::FMIN: return "X86ISD::FMIN";
|
2007-07-10 00:05:58 +00:00
|
|
|
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
|
|
|
|
case X86ISD::FRCP: return "X86ISD::FRCP";
|
2007-04-20 21:38:10 +00:00
|
|
|
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
|
|
|
|
case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
|
2007-07-14 14:06:15 +00:00
|
|
|
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
|
2007-10-11 19:40:01 +00:00
|
|
|
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
|
2007-11-16 01:31:51 +00:00
|
|
|
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
|
2008-05-08 00:57:18 +00:00
|
|
|
case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
|
|
|
|
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
|
2008-10-02 18:53:47 +00:00
|
|
|
case X86ISD::ATOMADD64_DAG: return "X86ISD::ATOMADD64_DAG";
|
|
|
|
case X86ISD::ATOMSUB64_DAG: return "X86ISD::ATOMSUB64_DAG";
|
|
|
|
case X86ISD::ATOMOR64_DAG: return "X86ISD::ATOMOR64_DAG";
|
|
|
|
case X86ISD::ATOMXOR64_DAG: return "X86ISD::ATOMXOR64_DAG";
|
|
|
|
case X86ISD::ATOMAND64_DAG: return "X86ISD::ATOMAND64_DAG";
|
|
|
|
case X86ISD::ATOMNAND64_DAG: return "X86ISD::ATOMNAND64_DAG";
|
2008-05-09 21:53:03 +00:00
|
|
|
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
|
|
|
|
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
|
2008-05-29 08:22:04 +00:00
|
|
|
case X86ISD::VSHL: return "X86ISD::VSHL";
|
|
|
|
case X86ISD::VSRL: return "X86ISD::VSRL";
|
2008-07-17 16:51:19 +00:00
|
|
|
case X86ISD::CMPPD: return "X86ISD::CMPPD";
|
|
|
|
case X86ISD::CMPPS: return "X86ISD::CMPPS";
|
|
|
|
case X86ISD::PCMPEQB: return "X86ISD::PCMPEQB";
|
|
|
|
case X86ISD::PCMPEQW: return "X86ISD::PCMPEQW";
|
|
|
|
case X86ISD::PCMPEQD: return "X86ISD::PCMPEQD";
|
|
|
|
case X86ISD::PCMPEQQ: return "X86ISD::PCMPEQQ";
|
|
|
|
case X86ISD::PCMPGTB: return "X86ISD::PCMPGTB";
|
|
|
|
case X86ISD::PCMPGTW: return "X86ISD::PCMPGTW";
|
|
|
|
case X86ISD::PCMPGTD: return "X86ISD::PCMPGTD";
|
|
|
|
case X86ISD::PCMPGTQ: return "X86ISD::PCMPGTQ";
|
2005-12-20 06:22:03 +00:00
|
|
|
}
|
|
|
|
}
|
2005-12-21 23:05:39 +00:00
|
|
|
|
2007-03-30 23:15:24 +00:00
|
|
|
// isLegalAddressingMode - Return true if the addressing mode represented
|
|
|
|
// by AM is legal for this target, for a load/store of the specified type.
|
|
|
|
bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
|
|
|
const Type *Ty) const {
|
|
|
|
// X86 supports extremely general addressing modes.
|
|
|
|
|
|
|
|
// X86 allows a sign-extended 32-bit immediate field as a displacement.
|
|
|
|
if (AM.BaseOffs <= -(1LL << 32) || AM.BaseOffs >= (1LL << 32)-1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (AM.BaseGV) {
|
2007-08-01 23:46:47 +00:00
|
|
|
// We can only fold this if we don't need an extra load.
|
2007-03-30 23:15:24 +00:00
|
|
|
if (Subtarget->GVRequiresExtraLoad(AM.BaseGV, getTargetMachine(), false))
|
|
|
|
return false;
|
2007-08-01 23:46:47 +00:00
|
|
|
|
|
|
|
// X86-64 only supports addr of globals in small code model.
|
|
|
|
if (Subtarget->is64Bit()) {
|
|
|
|
if (getTargetMachine().getCodeModel() != CodeModel::Small)
|
|
|
|
return false;
|
|
|
|
// If lower 4G is not available, then we must use rip-relative addressing.
|
|
|
|
if (AM.BaseOffs || AM.Scale > 1)
|
|
|
|
return false;
|
|
|
|
}
|
2007-03-30 23:15:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (AM.Scale) {
|
|
|
|
case 0:
|
|
|
|
case 1:
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
case 8:
|
|
|
|
// These scales always work.
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
case 5:
|
|
|
|
case 9:
|
|
|
|
// These scales are formed with basereg+scalereg. Only accept if there is
|
|
|
|
// no basereg yet.
|
|
|
|
if (AM.HasBaseReg)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
default: // Other stuff never works.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43375 91177308-0d34-0410-b5e6-96231b3b80d8
2007-10-26 01:56:11 +00:00
|
|
|
bool X86TargetLowering::isTruncateFree(const Type *Ty1, const Type *Ty2) const {
|
|
|
|
if (!Ty1->isInteger() || !Ty2->isInteger())
|
|
|
|
return false;
|
2007-10-29 07:57:50 +00:00
|
|
|
unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
|
|
|
|
unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
|
2008-03-20 02:18:41 +00:00
|
|
|
if (NumBits1 <= NumBits2)
|
2007-10-29 07:57:50 +00:00
|
|
|
return false;
|
|
|
|
return Subtarget->is64Bit() || NumBits1 < 64;
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43375 91177308-0d34-0410-b5e6-96231b3b80d8
2007-10-26 01:56:11 +00:00
|
|
|
}
|
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
bool X86TargetLowering::isTruncateFree(MVT VT1, MVT VT2) const {
|
|
|
|
if (!VT1.isInteger() || !VT2.isInteger())
|
2007-10-29 19:58:20 +00:00
|
|
|
return false;
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned NumBits1 = VT1.getSizeInBits();
|
|
|
|
unsigned NumBits2 = VT2.getSizeInBits();
|
2008-03-20 02:18:41 +00:00
|
|
|
if (NumBits1 <= NumBits2)
|
2007-10-29 19:58:20 +00:00
|
|
|
return false;
|
|
|
|
return Subtarget->is64Bit() || NumBits1 < 64;
|
|
|
|
}
|
Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
movw %dx, %si
LBB1_2: # bb
movl L_X$non_lazy_ptr, %edi
movw %si, (%edi)
movl L_Y$non_lazy_ptr, %edi
movw %dx, (%edi)
addw $4, %dx
incw %si
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
into
LBB1_1: # entry.bb_crit_edge
xorl %ecx, %ecx
xorw %dx, %dx
LBB1_2: # bb
movl L_X$non_lazy_ptr, %esi
movw %cx, (%esi)
movl L_Y$non_lazy_ptr, %esi
movw %dx, (%esi)
addw $4, %dx
incl %ecx
cmpl %eax, %ecx
jne LBB1_2 # bb
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43375 91177308-0d34-0410-b5e6-96231b3b80d8
2007-10-26 01:56:11 +00:00
|
|
|
|
2006-07-05 22:17:51 +00:00
|
|
|
/// isShuffleMaskLegal - Targets can use this to indicate that they only
|
|
|
|
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
|
|
|
|
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
|
|
|
|
/// are assumed to be legal.
|
|
|
|
bool
|
2008-07-27 21:46:04 +00:00
|
|
|
X86TargetLowering::isShuffleMaskLegal(SDValue Mask, MVT VT) const {
|
2006-07-05 22:17:51 +00:00
|
|
|
// Only do shuffles on 128-bit vector types for now.
|
2008-06-06 12:08:01 +00:00
|
|
|
if (VT.getSizeInBits() == 64) return false;
|
2008-08-28 21:40:38 +00:00
|
|
|
return (Mask.getNode()->getNumOperands() <= 4 ||
|
|
|
|
isIdentityMask(Mask.getNode()) ||
|
|
|
|
isIdentityMask(Mask.getNode(), true) ||
|
|
|
|
isSplatMask(Mask.getNode()) ||
|
|
|
|
isPSHUFHW_PSHUFLWMask(Mask.getNode()) ||
|
|
|
|
X86::isUNPCKLMask(Mask.getNode()) ||
|
|
|
|
X86::isUNPCKHMask(Mask.getNode()) ||
|
|
|
|
X86::isUNPCKL_v_undef_Mask(Mask.getNode()) ||
|
|
|
|
X86::isUNPCKH_v_undef_Mask(Mask.getNode()));
|
2006-07-05 22:17:51 +00:00
|
|
|
}
|
|
|
|
|
2008-04-09 20:09:42 +00:00
|
|
|
bool
|
2008-07-27 21:46:04 +00:00
|
|
|
X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDValue> &BVOps,
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT EVT, SelectionDAG &DAG) const {
|
2006-07-05 22:17:51 +00:00
|
|
|
unsigned NumElts = BVOps.size();
|
|
|
|
// Only do shuffles on 128-bit vector types for now.
|
2008-06-06 12:08:01 +00:00
|
|
|
if (EVT.getSizeInBits() * NumElts == 64) return false;
|
2006-07-05 22:17:51 +00:00
|
|
|
if (NumElts == 2) return true;
|
|
|
|
if (NumElts == 4) {
|
2007-02-25 07:10:00 +00:00
|
|
|
return (isMOVLMask(&BVOps[0], 4) ||
|
|
|
|
isCommutedMOVL(&BVOps[0], 4, true) ||
|
|
|
|
isSHUFPMask(&BVOps[0], 4) ||
|
|
|
|
isCommutedSHUFP(&BVOps[0], 4));
|
2006-07-05 22:17:51 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Scheduler Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2008-05-05 19:05:59 +00:00
|
|
|
// private utility function
|
|
|
|
MachineBasicBlock *
|
|
|
|
X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
|
|
|
|
MachineBasicBlock *MBB,
|
|
|
|
unsigned regOpc,
|
2008-06-14 05:48:15 +00:00
|
|
|
unsigned immOpc,
|
2008-08-19 18:47:28 +00:00
|
|
|
unsigned LoadOpc,
|
|
|
|
unsigned CXchgOpc,
|
|
|
|
unsigned copyOpc,
|
|
|
|
unsigned notOpc,
|
|
|
|
unsigned EAXreg,
|
|
|
|
TargetRegisterClass *RC,
|
2008-06-14 05:48:15 +00:00
|
|
|
bool invSrc) {
|
2008-05-05 19:05:59 +00:00
|
|
|
// For the atomic bitwise operator, we generate
|
|
|
|
// thisMBB:
|
|
|
|
// newMBB:
|
2008-05-05 22:56:23 +00:00
|
|
|
// ld t1 = [bitinstr.addr]
|
|
|
|
// op t2 = t1, [bitinstr.val]
|
|
|
|
// mov EAX = t1
|
2008-05-05 19:05:59 +00:00
|
|
|
// lcs dest = [bitinstr.addr], t2 [EAX is implicit]
|
|
|
|
// bz newMBB
|
|
|
|
// fallthrough -->nextMBB
|
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
|
|
|
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction::iterator MBBIter = MBB;
|
2008-05-05 19:05:59 +00:00
|
|
|
++MBBIter;
|
|
|
|
|
|
|
|
/// First build the CFG
|
|
|
|
MachineFunction *F = MBB->getParent();
|
|
|
|
MachineBasicBlock *thisMBB = MBB;
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
F->insert(MBBIter, newMBB);
|
|
|
|
F->insert(MBBIter, nextMBB);
|
2008-05-05 19:05:59 +00:00
|
|
|
|
|
|
|
// Move all successors to thisMBB to nextMBB
|
|
|
|
nextMBB->transferSuccessors(thisMBB);
|
|
|
|
|
|
|
|
// Update thisMBB to fall through to newMBB
|
|
|
|
thisMBB->addSuccessor(newMBB);
|
|
|
|
|
|
|
|
// newMBB jumps to itself and fall through to nextMBB
|
|
|
|
newMBB->addSuccessor(nextMBB);
|
|
|
|
newMBB->addSuccessor(newMBB);
|
|
|
|
|
|
|
|
// Insert instructions into newMBB based on incoming instruction
|
|
|
|
assert(bInstr->getNumOperands() < 8 && "unexpected number of operands");
|
|
|
|
MachineOperand& destOper = bInstr->getOperand(0);
|
|
|
|
MachineOperand* argOpers[6];
|
|
|
|
int numArgs = bInstr->getNumOperands() - 1;
|
|
|
|
for (int i=0; i < numArgs; ++i)
|
|
|
|
argOpers[i] = &bInstr->getOperand(i+1);
|
|
|
|
|
|
|
|
// x86 address has 4 operands: base, index, scale, and displacement
|
|
|
|
int lastAddrIndx = 3; // [0,3]
|
|
|
|
int valArgIndx = 4;
|
|
|
|
|
2008-08-19 18:47:28 +00:00
|
|
|
unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
|
|
|
|
MachineInstrBuilder MIB = BuildMI(newMBB, TII->get(LoadOpc), t1);
|
2008-05-05 19:05:59 +00:00
|
|
|
for (int i=0; i <= lastAddrIndx; ++i)
|
|
|
|
(*MIB).addOperand(*argOpers[i]);
|
2008-06-14 05:48:15 +00:00
|
|
|
|
2008-08-19 18:47:28 +00:00
|
|
|
unsigned tt = F->getRegInfo().createVirtualRegister(RC);
|
2008-06-14 05:48:15 +00:00
|
|
|
if (invSrc) {
|
2008-08-19 18:47:28 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(notOpc), tt).addReg(t1);
|
2008-06-14 05:48:15 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
tt = t1;
|
|
|
|
|
2008-08-19 18:47:28 +00:00
|
|
|
unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
|
2008-10-03 15:45:36 +00:00
|
|
|
assert((argOpers[valArgIndx]->isReg() ||
|
|
|
|
argOpers[valArgIndx]->isImm()) &&
|
2008-09-13 17:58:21 +00:00
|
|
|
"invalid operand");
|
2008-10-03 15:45:36 +00:00
|
|
|
if (argOpers[valArgIndx]->isReg())
|
2008-05-05 19:05:59 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(regOpc), t2);
|
|
|
|
else
|
|
|
|
MIB = BuildMI(newMBB, TII->get(immOpc), t2);
|
2008-06-14 05:48:15 +00:00
|
|
|
MIB.addReg(tt);
|
2008-05-05 19:05:59 +00:00
|
|
|
(*MIB).addOperand(*argOpers[valArgIndx]);
|
2008-06-14 05:48:15 +00:00
|
|
|
|
2008-08-19 18:47:28 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(copyOpc), EAXreg);
|
2008-05-05 22:56:23 +00:00
|
|
|
MIB.addReg(t1);
|
|
|
|
|
2008-08-19 18:47:28 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(CXchgOpc));
|
2008-05-05 19:05:59 +00:00
|
|
|
for (int i=0; i <= lastAddrIndx; ++i)
|
|
|
|
(*MIB).addOperand(*argOpers[i]);
|
|
|
|
MIB.addReg(t2);
|
2008-07-17 04:54:06 +00:00
|
|
|
assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
|
|
|
|
(*MIB).addMemOperand(*F, *bInstr->memoperands_begin());
|
|
|
|
|
2008-08-19 18:47:28 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(copyOpc), destOper.getReg());
|
|
|
|
MIB.addReg(EAXreg);
|
2008-05-05 19:05:59 +00:00
|
|
|
|
|
|
|
// insert branch
|
|
|
|
BuildMI(newMBB, TII->get(X86::JNE)).addMBB(newMBB);
|
|
|
|
|
2008-07-07 23:14:23 +00:00
|
|
|
F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
|
2008-05-05 19:05:59 +00:00
|
|
|
return nextMBB;
|
|
|
|
}
|
|
|
|
|
2008-10-03 19:41:08 +00:00
|
|
|
// private utility function: 64 bit atomics on 32 bit host.
|
2008-10-02 18:53:47 +00:00
|
|
|
MachineBasicBlock *
|
|
|
|
X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
|
|
|
|
MachineBasicBlock *MBB,
|
|
|
|
unsigned regOpcL,
|
|
|
|
unsigned regOpcH,
|
|
|
|
unsigned immOpcL,
|
|
|
|
unsigned immOpcH,
|
|
|
|
bool invSrc) {
|
|
|
|
// For the atomic bitwise operator, we generate
|
|
|
|
// thisMBB (instructions are in pairs, except cmpxchg8b)
|
|
|
|
// ld t1,t2 = [bitinstr.addr]
|
|
|
|
// newMBB:
|
|
|
|
// out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4)
|
|
|
|
// op t5, t6 <- out1, out2, [bitinstr.val]
|
2008-10-03 22:25:52 +00:00
|
|
|
// (for SWAP, substitute: mov t5, t6 <- [bitinstr.val])
|
2008-10-02 18:53:47 +00:00
|
|
|
// mov ECX, EBX <- t5, t6
|
|
|
|
// mov EAX, EDX <- t1, t2
|
|
|
|
// cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit]
|
|
|
|
// mov t3, t4 <- EAX, EDX
|
|
|
|
// bz newMBB
|
|
|
|
// result in out1, out2
|
|
|
|
// fallthrough -->nextMBB
|
|
|
|
|
|
|
|
const TargetRegisterClass *RC = X86::GR32RegisterClass;
|
|
|
|
const unsigned LoadOpc = X86::MOV32rm;
|
|
|
|
const unsigned copyOpc = X86::MOV32rr;
|
|
|
|
const unsigned NotOpc = X86::NOT32r;
|
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
|
|
|
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
|
|
|
|
MachineFunction::iterator MBBIter = MBB;
|
|
|
|
++MBBIter;
|
|
|
|
|
|
|
|
/// First build the CFG
|
|
|
|
MachineFunction *F = MBB->getParent();
|
|
|
|
MachineBasicBlock *thisMBB = MBB;
|
|
|
|
MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
F->insert(MBBIter, newMBB);
|
|
|
|
F->insert(MBBIter, nextMBB);
|
|
|
|
|
|
|
|
// Move all successors to thisMBB to nextMBB
|
|
|
|
nextMBB->transferSuccessors(thisMBB);
|
|
|
|
|
|
|
|
// Update thisMBB to fall through to newMBB
|
|
|
|
thisMBB->addSuccessor(newMBB);
|
|
|
|
|
|
|
|
// newMBB jumps to itself and fall through to nextMBB
|
|
|
|
newMBB->addSuccessor(nextMBB);
|
|
|
|
newMBB->addSuccessor(newMBB);
|
|
|
|
|
|
|
|
// Insert instructions into newMBB based on incoming instruction
|
|
|
|
// There are 8 "real" operands plus 9 implicit def/uses, ignored here.
|
|
|
|
assert(bInstr->getNumOperands() < 18 && "unexpected number of operands");
|
|
|
|
MachineOperand& dest1Oper = bInstr->getOperand(0);
|
|
|
|
MachineOperand& dest2Oper = bInstr->getOperand(1);
|
|
|
|
MachineOperand* argOpers[6];
|
|
|
|
for (int i=0; i < 6; ++i)
|
|
|
|
argOpers[i] = &bInstr->getOperand(i+2);
|
|
|
|
|
|
|
|
// x86 address has 4 operands: base, index, scale, and displacement
|
|
|
|
int lastAddrIndx = 3; // [0,3]
|
|
|
|
|
|
|
|
unsigned t1 = F->getRegInfo().createVirtualRegister(RC);
|
|
|
|
MachineInstrBuilder MIB = BuildMI(thisMBB, TII->get(LoadOpc), t1);
|
|
|
|
for (int i=0; i <= lastAddrIndx; ++i)
|
|
|
|
(*MIB).addOperand(*argOpers[i]);
|
|
|
|
unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
|
|
|
|
MIB = BuildMI(thisMBB, TII->get(LoadOpc), t2);
|
2008-10-03 22:25:52 +00:00
|
|
|
// add 4 to displacement.
|
2008-10-02 18:53:47 +00:00
|
|
|
for (int i=0; i <= lastAddrIndx-1; ++i)
|
|
|
|
(*MIB).addOperand(*argOpers[i]);
|
2008-10-03 22:25:52 +00:00
|
|
|
MachineOperand newOp3 = *(argOpers[3]);
|
|
|
|
if (newOp3.isImm())
|
|
|
|
newOp3.setImm(newOp3.getImm()+4);
|
|
|
|
else
|
|
|
|
newOp3.setOffset(newOp3.getOffset()+4);
|
2008-10-02 18:53:47 +00:00
|
|
|
(*MIB).addOperand(newOp3);
|
|
|
|
|
|
|
|
// t3/4 are defined later, at the bottom of the loop
|
|
|
|
unsigned t3 = F->getRegInfo().createVirtualRegister(RC);
|
|
|
|
unsigned t4 = F->getRegInfo().createVirtualRegister(RC);
|
|
|
|
BuildMI(newMBB, TII->get(X86::PHI), dest1Oper.getReg())
|
|
|
|
.addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB);
|
|
|
|
BuildMI(newMBB, TII->get(X86::PHI), dest2Oper.getReg())
|
|
|
|
.addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB);
|
|
|
|
|
|
|
|
unsigned tt1 = F->getRegInfo().createVirtualRegister(RC);
|
|
|
|
unsigned tt2 = F->getRegInfo().createVirtualRegister(RC);
|
|
|
|
if (invSrc) {
|
|
|
|
MIB = BuildMI(newMBB, TII->get(NotOpc), tt1).addReg(t1);
|
|
|
|
MIB = BuildMI(newMBB, TII->get(NotOpc), tt2).addReg(t2);
|
|
|
|
} else {
|
|
|
|
tt1 = t1;
|
|
|
|
tt2 = t2;
|
|
|
|
}
|
|
|
|
|
2008-10-03 15:45:36 +00:00
|
|
|
assert((argOpers[4]->isReg() || argOpers[4]->isImm()) &&
|
2008-10-02 18:53:47 +00:00
|
|
|
"invalid operand");
|
|
|
|
unsigned t5 = F->getRegInfo().createVirtualRegister(RC);
|
|
|
|
unsigned t6 = F->getRegInfo().createVirtualRegister(RC);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (argOpers[4]->isReg())
|
2008-10-02 18:53:47 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(regOpcL), t5);
|
|
|
|
else
|
|
|
|
MIB = BuildMI(newMBB, TII->get(immOpcL), t5);
|
2008-10-03 22:25:52 +00:00
|
|
|
if (regOpcL != X86::MOV32rr)
|
|
|
|
MIB.addReg(tt1);
|
2008-10-02 18:53:47 +00:00
|
|
|
(*MIB).addOperand(*argOpers[4]);
|
2008-10-03 15:45:36 +00:00
|
|
|
assert(argOpers[5]->isReg() == argOpers[4]->isReg());
|
|
|
|
assert(argOpers[5]->isImm() == argOpers[4]->isImm());
|
|
|
|
if (argOpers[5]->isReg())
|
2008-10-02 18:53:47 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(regOpcH), t6);
|
|
|
|
else
|
|
|
|
MIB = BuildMI(newMBB, TII->get(immOpcH), t6);
|
2008-10-03 22:25:52 +00:00
|
|
|
if (regOpcH != X86::MOV32rr)
|
|
|
|
MIB.addReg(tt2);
|
2008-10-02 18:53:47 +00:00
|
|
|
(*MIB).addOperand(*argOpers[5]);
|
|
|
|
|
|
|
|
MIB = BuildMI(newMBB, TII->get(copyOpc), X86::EAX);
|
|
|
|
MIB.addReg(t1);
|
|
|
|
MIB = BuildMI(newMBB, TII->get(copyOpc), X86::EDX);
|
|
|
|
MIB.addReg(t2);
|
|
|
|
|
|
|
|
MIB = BuildMI(newMBB, TII->get(copyOpc), X86::EBX);
|
|
|
|
MIB.addReg(t5);
|
|
|
|
MIB = BuildMI(newMBB, TII->get(copyOpc), X86::ECX);
|
|
|
|
MIB.addReg(t6);
|
|
|
|
|
|
|
|
MIB = BuildMI(newMBB, TII->get(X86::LCMPXCHG8B));
|
|
|
|
for (int i=0; i <= lastAddrIndx; ++i)
|
|
|
|
(*MIB).addOperand(*argOpers[i]);
|
|
|
|
|
|
|
|
assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand");
|
|
|
|
(*MIB).addMemOperand(*F, *bInstr->memoperands_begin());
|
|
|
|
|
|
|
|
MIB = BuildMI(newMBB, TII->get(copyOpc), t3);
|
|
|
|
MIB.addReg(X86::EAX);
|
|
|
|
MIB = BuildMI(newMBB, TII->get(copyOpc), t4);
|
|
|
|
MIB.addReg(X86::EDX);
|
|
|
|
|
|
|
|
// insert branch
|
|
|
|
BuildMI(newMBB, TII->get(X86::JNE)).addMBB(newMBB);
|
|
|
|
|
|
|
|
F->DeleteMachineInstr(bInstr); // The pseudo instruction is gone now.
|
|
|
|
return nextMBB;
|
|
|
|
}
|
|
|
|
|
2008-05-05 19:05:59 +00:00
|
|
|
// private utility function
|
|
|
|
MachineBasicBlock *
|
|
|
|
X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr,
|
|
|
|
MachineBasicBlock *MBB,
|
|
|
|
unsigned cmovOpc) {
|
|
|
|
// For the atomic min/max operator, we generate
|
|
|
|
// thisMBB:
|
|
|
|
// newMBB:
|
2008-05-05 22:56:23 +00:00
|
|
|
// ld t1 = [min/max.addr]
|
2008-05-05 19:05:59 +00:00
|
|
|
// mov t2 = [min/max.val]
|
|
|
|
// cmp t1, t2
|
|
|
|
// cmov[cond] t2 = t1
|
2008-05-05 22:56:23 +00:00
|
|
|
// mov EAX = t1
|
2008-05-05 19:05:59 +00:00
|
|
|
// lcs dest = [bitinstr.addr], t2 [EAX is implicit]
|
|
|
|
// bz newMBB
|
|
|
|
// fallthrough -->nextMBB
|
|
|
|
//
|
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
|
|
|
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction::iterator MBBIter = MBB;
|
2008-05-05 19:05:59 +00:00
|
|
|
++MBBIter;
|
|
|
|
|
|
|
|
/// First build the CFG
|
|
|
|
MachineFunction *F = MBB->getParent();
|
|
|
|
MachineBasicBlock *thisMBB = MBB;
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
F->insert(MBBIter, newMBB);
|
|
|
|
F->insert(MBBIter, nextMBB);
|
2008-05-05 19:05:59 +00:00
|
|
|
|
|
|
|
// Move all successors to thisMBB to nextMBB
|
|
|
|
nextMBB->transferSuccessors(thisMBB);
|
|
|
|
|
|
|
|
// Update thisMBB to fall through to newMBB
|
|
|
|
thisMBB->addSuccessor(newMBB);
|
|
|
|
|
|
|
|
// newMBB jumps to newMBB and fall through to nextMBB
|
|
|
|
newMBB->addSuccessor(nextMBB);
|
|
|
|
newMBB->addSuccessor(newMBB);
|
|
|
|
|
|
|
|
// Insert instructions into newMBB based on incoming instruction
|
|
|
|
assert(mInstr->getNumOperands() < 8 && "unexpected number of operands");
|
|
|
|
MachineOperand& destOper = mInstr->getOperand(0);
|
|
|
|
MachineOperand* argOpers[6];
|
|
|
|
int numArgs = mInstr->getNumOperands() - 1;
|
|
|
|
for (int i=0; i < numArgs; ++i)
|
|
|
|
argOpers[i] = &mInstr->getOperand(i+1);
|
|
|
|
|
|
|
|
// x86 address has 4 operands: base, index, scale, and displacement
|
|
|
|
int lastAddrIndx = 3; // [0,3]
|
|
|
|
int valArgIndx = 4;
|
|
|
|
|
2008-05-05 22:56:23 +00:00
|
|
|
unsigned t1 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
|
|
|
|
MachineInstrBuilder MIB = BuildMI(newMBB, TII->get(X86::MOV32rm), t1);
|
2008-05-05 19:05:59 +00:00
|
|
|
for (int i=0; i <= lastAddrIndx; ++i)
|
|
|
|
(*MIB).addOperand(*argOpers[i]);
|
2008-05-05 22:56:23 +00:00
|
|
|
|
2008-05-05 19:05:59 +00:00
|
|
|
// We only support register and immediate values
|
2008-10-03 15:45:36 +00:00
|
|
|
assert((argOpers[valArgIndx]->isReg() ||
|
|
|
|
argOpers[valArgIndx]->isImm()) &&
|
2008-09-13 17:58:21 +00:00
|
|
|
"invalid operand");
|
2008-05-05 19:05:59 +00:00
|
|
|
|
|
|
|
unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (argOpers[valArgIndx]->isReg())
|
2008-05-05 19:05:59 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2);
|
|
|
|
else
|
|
|
|
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), t2);
|
|
|
|
(*MIB).addOperand(*argOpers[valArgIndx]);
|
|
|
|
|
2008-05-05 22:56:23 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), X86::EAX);
|
|
|
|
MIB.addReg(t1);
|
|
|
|
|
2008-05-05 19:05:59 +00:00
|
|
|
MIB = BuildMI(newMBB, TII->get(X86::CMP32rr));
|
|
|
|
MIB.addReg(t1);
|
|
|
|
MIB.addReg(t2);
|
|
|
|
|
|
|
|
// Generate movc
|
|
|
|
unsigned t3 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
|
|
|
|
MIB = BuildMI(newMBB, TII->get(cmovOpc),t3);
|
|
|
|
MIB.addReg(t2);
|
|
|
|
MIB.addReg(t1);
|
|
|
|
|
|
|
|
// Cmp and exchange if none has modified the memory location
|
|
|
|
MIB = BuildMI(newMBB, TII->get(X86::LCMPXCHG32));
|
|
|
|
for (int i=0; i <= lastAddrIndx; ++i)
|
|
|
|
(*MIB).addOperand(*argOpers[i]);
|
|
|
|
MIB.addReg(t3);
|
2008-07-17 04:54:06 +00:00
|
|
|
assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand");
|
|
|
|
(*MIB).addMemOperand(*F, *mInstr->memoperands_begin());
|
2008-05-05 19:05:59 +00:00
|
|
|
|
|
|
|
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), destOper.getReg());
|
|
|
|
MIB.addReg(X86::EAX);
|
|
|
|
|
|
|
|
// insert branch
|
|
|
|
BuildMI(newMBB, TII->get(X86::JNE)).addMBB(newMBB);
|
|
|
|
|
2008-07-07 23:14:23 +00:00
|
|
|
F->DeleteMachineInstr(mInstr); // The pseudo instruction is gone now.
|
2008-05-05 19:05:59 +00:00
|
|
|
return nextMBB;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-07-05 22:17:51 +00:00
|
|
|
MachineBasicBlock *
|
2008-01-30 18:18:23 +00:00
|
|
|
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
|
|
|
MachineBasicBlock *BB) {
|
2006-11-27 23:37:22 +00:00
|
|
|
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
2006-07-05 22:17:51 +00:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: assert(false && "Unexpected instr type to insert");
|
|
|
|
case X86::CMOV_FR32:
|
|
|
|
case X86::CMOV_FR64:
|
|
|
|
case X86::CMOV_V4F32:
|
|
|
|
case X86::CMOV_V2F64:
|
2007-09-29 00:00:36 +00:00
|
|
|
case X86::CMOV_V2I64: {
|
2006-07-05 22:17:51 +00:00
|
|
|
// To "insert" a SELECT_CC instruction, we actually have to insert the
|
|
|
|
// diamond control-flow pattern. The incoming instruction knows the
|
|
|
|
// destination vreg to set, the condition code register to branch on, the
|
|
|
|
// true/false values to select between, and a branch opcode to use.
|
|
|
|
const BasicBlock *LLVM_BB = BB->getBasicBlock();
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction::iterator It = BB;
|
2006-07-05 22:17:51 +00:00
|
|
|
++It;
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-07-05 22:17:51 +00:00
|
|
|
// thisMBB:
|
|
|
|
// ...
|
|
|
|
// TrueVal = ...
|
|
|
|
// cmpTY ccX, r1, r2
|
|
|
|
// bCC copy1MBB
|
|
|
|
// fallthrough --> copy0MBB
|
|
|
|
MachineBasicBlock *thisMBB = BB;
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction *F = BB->getParent();
|
|
|
|
MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
|
|
|
|
MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
|
2006-11-21 00:01:06 +00:00
|
|
|
unsigned Opc =
|
2006-10-20 17:42:20 +00:00
|
|
|
X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
|
2006-11-27 23:37:22 +00:00
|
|
|
BuildMI(BB, TII->get(Opc)).addMBB(sinkMBB);
|
2008-07-07 23:14:23 +00:00
|
|
|
F->insert(It, copy0MBB);
|
|
|
|
F->insert(It, sinkMBB);
|
2008-05-05 19:05:59 +00:00
|
|
|
// Update machine-CFG edges by transferring all successors of the current
|
2006-07-05 22:17:51 +00:00
|
|
|
// block to the new block which will contain the Phi node for the select.
|
2008-05-05 19:05:59 +00:00
|
|
|
sinkMBB->transferSuccessors(BB);
|
|
|
|
|
|
|
|
// Add the true and fallthrough blocks as its successors.
|
2006-07-05 22:17:51 +00:00
|
|
|
BB->addSuccessor(copy0MBB);
|
|
|
|
BB->addSuccessor(sinkMBB);
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-07-05 22:17:51 +00:00
|
|
|
// copy0MBB:
|
|
|
|
// %FalseValue = ...
|
|
|
|
// # fallthrough to sinkMBB
|
|
|
|
BB = copy0MBB;
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-07-05 22:17:51 +00:00
|
|
|
// Update machine-CFG edges
|
|
|
|
BB->addSuccessor(sinkMBB);
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-07-05 22:17:51 +00:00
|
|
|
// sinkMBB:
|
|
|
|
// %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
|
|
|
|
// ...
|
|
|
|
BB = sinkMBB;
|
2006-11-27 23:37:22 +00:00
|
|
|
BuildMI(BB, TII->get(X86::PHI), MI->getOperand(0).getReg())
|
2006-07-05 22:17:51 +00:00
|
|
|
.addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
|
|
|
|
.addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
|
|
|
|
|
2008-07-07 23:14:23 +00:00
|
|
|
F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
|
2006-07-05 22:17:51 +00:00
|
|
|
return BB;
|
|
|
|
}
|
|
|
|
|
2007-07-03 00:53:03 +00:00
|
|
|
case X86::FP32_TO_INT16_IN_MEM:
|
|
|
|
case X86::FP32_TO_INT32_IN_MEM:
|
|
|
|
case X86::FP32_TO_INT64_IN_MEM:
|
|
|
|
case X86::FP64_TO_INT16_IN_MEM:
|
|
|
|
case X86::FP64_TO_INT32_IN_MEM:
|
2007-08-07 01:17:37 +00:00
|
|
|
case X86::FP64_TO_INT64_IN_MEM:
|
|
|
|
case X86::FP80_TO_INT16_IN_MEM:
|
|
|
|
case X86::FP80_TO_INT32_IN_MEM:
|
|
|
|
case X86::FP80_TO_INT64_IN_MEM: {
|
2006-07-05 22:17:51 +00:00
|
|
|
// Change the floating point control register to use "round towards zero"
|
|
|
|
// mode when truncating to an integer value.
|
|
|
|
MachineFunction *F = BB->getParent();
|
|
|
|
int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2);
|
2006-11-27 23:37:22 +00:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::FNSTCW16m)), CWFrameIdx);
|
2006-07-05 22:17:51 +00:00
|
|
|
|
|
|
|
// Load the old value of the high byte of the control word...
|
|
|
|
unsigned OldCW =
|
2007-12-31 04:13:23 +00:00
|
|
|
F->getRegInfo().createVirtualRegister(X86::GR16RegisterClass);
|
2006-11-27 23:37:22 +00:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::MOV16rm), OldCW), CWFrameIdx);
|
2006-07-05 22:17:51 +00:00
|
|
|
|
|
|
|
// Set the high part to be round to zero...
|
2006-11-27 23:37:22 +00:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::MOV16mi)), CWFrameIdx)
|
|
|
|
.addImm(0xC7F);
|
2006-07-05 22:17:51 +00:00
|
|
|
|
|
|
|
// Reload the modified control word now...
|
2006-11-27 23:37:22 +00:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
|
2006-07-05 22:17:51 +00:00
|
|
|
|
|
|
|
// Restore the memory image of control word to original value
|
2006-11-27 23:37:22 +00:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::MOV16mr)), CWFrameIdx)
|
|
|
|
.addReg(OldCW);
|
2006-07-05 22:17:51 +00:00
|
|
|
|
|
|
|
// Get the X86 opcode to use.
|
|
|
|
unsigned Opc;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: assert(0 && "illegal opcode!");
|
2007-07-04 21:07:47 +00:00
|
|
|
case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
|
|
|
|
case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
|
|
|
|
case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
|
|
|
|
case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
|
|
|
|
case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
|
|
|
|
case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
|
2007-08-07 01:17:37 +00:00
|
|
|
case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
|
|
|
|
case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
|
|
|
|
case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
|
2006-07-05 22:17:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
X86AddressMode AM;
|
|
|
|
MachineOperand &Op = MI->getOperand(0);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (Op.isReg()) {
|
2006-07-05 22:17:51 +00:00
|
|
|
AM.BaseType = X86AddressMode::RegBase;
|
|
|
|
AM.Base.Reg = Op.getReg();
|
|
|
|
} else {
|
|
|
|
AM.BaseType = X86AddressMode::FrameIndexBase;
|
2007-12-30 23:10:15 +00:00
|
|
|
AM.Base.FrameIndex = Op.getIndex();
|
2006-07-05 22:17:51 +00:00
|
|
|
}
|
|
|
|
Op = MI->getOperand(1);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (Op.isImm())
|
2006-10-20 17:42:20 +00:00
|
|
|
AM.Scale = Op.getImm();
|
2006-07-05 22:17:51 +00:00
|
|
|
Op = MI->getOperand(2);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (Op.isImm())
|
2006-10-20 17:42:20 +00:00
|
|
|
AM.IndexReg = Op.getImm();
|
2006-07-05 22:17:51 +00:00
|
|
|
Op = MI->getOperand(3);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (Op.isGlobal()) {
|
2006-07-05 22:17:51 +00:00
|
|
|
AM.GV = Op.getGlobal();
|
|
|
|
} else {
|
2006-10-20 17:42:20 +00:00
|
|
|
AM.Disp = Op.getImm();
|
2006-07-05 22:17:51 +00:00
|
|
|
}
|
2006-11-27 23:37:22 +00:00
|
|
|
addFullAddress(BuildMI(BB, TII->get(Opc)), AM)
|
|
|
|
.addReg(MI->getOperand(4).getReg());
|
2006-07-05 22:17:51 +00:00
|
|
|
|
|
|
|
// Reload the original control word now.
|
2006-11-27 23:37:22 +00:00
|
|
|
addFrameReference(BuildMI(BB, TII->get(X86::FLDCW16m)), CWFrameIdx);
|
2006-07-05 22:17:51 +00:00
|
|
|
|
2008-07-07 23:14:23 +00:00
|
|
|
F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
|
2006-07-05 22:17:51 +00:00
|
|
|
return BB;
|
|
|
|
}
|
2008-05-05 19:05:59 +00:00
|
|
|
case X86::ATOMAND32:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
|
2008-08-19 18:47:28 +00:00
|
|
|
X86::AND32ri, X86::MOV32rm,
|
|
|
|
X86::LCMPXCHG32, X86::MOV32rr,
|
|
|
|
X86::NOT32r, X86::EAX,
|
|
|
|
X86::GR32RegisterClass);
|
2008-05-05 19:05:59 +00:00
|
|
|
case X86::ATOMOR32:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr,
|
2008-08-19 18:47:28 +00:00
|
|
|
X86::OR32ri, X86::MOV32rm,
|
|
|
|
X86::LCMPXCHG32, X86::MOV32rr,
|
|
|
|
X86::NOT32r, X86::EAX,
|
|
|
|
X86::GR32RegisterClass);
|
2008-05-05 19:05:59 +00:00
|
|
|
case X86::ATOMXOR32:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
|
2008-08-19 18:47:28 +00:00
|
|
|
X86::XOR32ri, X86::MOV32rm,
|
|
|
|
X86::LCMPXCHG32, X86::MOV32rr,
|
|
|
|
X86::NOT32r, X86::EAX,
|
|
|
|
X86::GR32RegisterClass);
|
2008-06-14 05:48:15 +00:00
|
|
|
case X86::ATOMNAND32:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
|
2008-08-19 18:47:28 +00:00
|
|
|
X86::AND32ri, X86::MOV32rm,
|
|
|
|
X86::LCMPXCHG32, X86::MOV32rr,
|
|
|
|
X86::NOT32r, X86::EAX,
|
|
|
|
X86::GR32RegisterClass, true);
|
2008-05-05 19:05:59 +00:00
|
|
|
case X86::ATOMMIN32:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr);
|
|
|
|
case X86::ATOMMAX32:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr);
|
|
|
|
case X86::ATOMUMIN32:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr);
|
|
|
|
case X86::ATOMUMAX32:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr);
|
2008-08-19 18:47:28 +00:00
|
|
|
|
|
|
|
case X86::ATOMAND16:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
|
|
|
|
X86::AND16ri, X86::MOV16rm,
|
|
|
|
X86::LCMPXCHG16, X86::MOV16rr,
|
|
|
|
X86::NOT16r, X86::AX,
|
|
|
|
X86::GR16RegisterClass);
|
|
|
|
case X86::ATOMOR16:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr,
|
|
|
|
X86::OR16ri, X86::MOV16rm,
|
|
|
|
X86::LCMPXCHG16, X86::MOV16rr,
|
|
|
|
X86::NOT16r, X86::AX,
|
|
|
|
X86::GR16RegisterClass);
|
|
|
|
case X86::ATOMXOR16:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr,
|
|
|
|
X86::XOR16ri, X86::MOV16rm,
|
|
|
|
X86::LCMPXCHG16, X86::MOV16rr,
|
|
|
|
X86::NOT16r, X86::AX,
|
|
|
|
X86::GR16RegisterClass);
|
|
|
|
case X86::ATOMNAND16:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr,
|
|
|
|
X86::AND16ri, X86::MOV16rm,
|
|
|
|
X86::LCMPXCHG16, X86::MOV16rr,
|
|
|
|
X86::NOT16r, X86::AX,
|
|
|
|
X86::GR16RegisterClass, true);
|
|
|
|
case X86::ATOMMIN16:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr);
|
|
|
|
case X86::ATOMMAX16:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr);
|
|
|
|
case X86::ATOMUMIN16:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr);
|
|
|
|
case X86::ATOMUMAX16:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr);
|
|
|
|
|
|
|
|
case X86::ATOMAND8:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
|
|
|
|
X86::AND8ri, X86::MOV8rm,
|
|
|
|
X86::LCMPXCHG8, X86::MOV8rr,
|
|
|
|
X86::NOT8r, X86::AL,
|
|
|
|
X86::GR8RegisterClass);
|
|
|
|
case X86::ATOMOR8:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr,
|
|
|
|
X86::OR8ri, X86::MOV8rm,
|
|
|
|
X86::LCMPXCHG8, X86::MOV8rr,
|
|
|
|
X86::NOT8r, X86::AL,
|
|
|
|
X86::GR8RegisterClass);
|
|
|
|
case X86::ATOMXOR8:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr,
|
|
|
|
X86::XOR8ri, X86::MOV8rm,
|
|
|
|
X86::LCMPXCHG8, X86::MOV8rr,
|
|
|
|
X86::NOT8r, X86::AL,
|
|
|
|
X86::GR8RegisterClass);
|
|
|
|
case X86::ATOMNAND8:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr,
|
|
|
|
X86::AND8ri, X86::MOV8rm,
|
|
|
|
X86::LCMPXCHG8, X86::MOV8rr,
|
|
|
|
X86::NOT8r, X86::AL,
|
|
|
|
X86::GR8RegisterClass, true);
|
|
|
|
// FIXME: There are no CMOV8 instructions; MIN/MAX need some other way.
|
2008-10-02 18:53:47 +00:00
|
|
|
// This group is for 64-bit host.
|
2008-08-20 00:48:50 +00:00
|
|
|
case X86::ATOMAND64:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
|
|
|
|
X86::AND64ri32, X86::MOV64rm,
|
|
|
|
X86::LCMPXCHG64, X86::MOV64rr,
|
|
|
|
X86::NOT64r, X86::RAX,
|
|
|
|
X86::GR64RegisterClass);
|
|
|
|
case X86::ATOMOR64:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr,
|
|
|
|
X86::OR64ri32, X86::MOV64rm,
|
|
|
|
X86::LCMPXCHG64, X86::MOV64rr,
|
|
|
|
X86::NOT64r, X86::RAX,
|
|
|
|
X86::GR64RegisterClass);
|
|
|
|
case X86::ATOMXOR64:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr,
|
|
|
|
X86::XOR64ri32, X86::MOV64rm,
|
|
|
|
X86::LCMPXCHG64, X86::MOV64rr,
|
|
|
|
X86::NOT64r, X86::RAX,
|
|
|
|
X86::GR64RegisterClass);
|
|
|
|
case X86::ATOMNAND64:
|
|
|
|
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr,
|
|
|
|
X86::AND64ri32, X86::MOV64rm,
|
|
|
|
X86::LCMPXCHG64, X86::MOV64rr,
|
|
|
|
X86::NOT64r, X86::RAX,
|
|
|
|
X86::GR64RegisterClass, true);
|
|
|
|
case X86::ATOMMIN64:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr);
|
|
|
|
case X86::ATOMMAX64:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr);
|
|
|
|
case X86::ATOMUMIN64:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr);
|
|
|
|
case X86::ATOMUMAX64:
|
|
|
|
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr);
|
2008-10-02 18:53:47 +00:00
|
|
|
|
|
|
|
// This group does 64-bit operations on a 32-bit host.
|
|
|
|
case X86::ATOMAND6432:
|
|
|
|
return EmitAtomicBit6432WithCustomInserter(MI, BB,
|
|
|
|
X86::AND32rr, X86::AND32rr,
|
|
|
|
X86::AND32ri, X86::AND32ri,
|
|
|
|
false);
|
|
|
|
case X86::ATOMOR6432:
|
|
|
|
return EmitAtomicBit6432WithCustomInserter(MI, BB,
|
|
|
|
X86::OR32rr, X86::OR32rr,
|
|
|
|
X86::OR32ri, X86::OR32ri,
|
|
|
|
false);
|
|
|
|
case X86::ATOMXOR6432:
|
|
|
|
return EmitAtomicBit6432WithCustomInserter(MI, BB,
|
|
|
|
X86::XOR32rr, X86::XOR32rr,
|
|
|
|
X86::XOR32ri, X86::XOR32ri,
|
|
|
|
false);
|
|
|
|
case X86::ATOMNAND6432:
|
|
|
|
return EmitAtomicBit6432WithCustomInserter(MI, BB,
|
|
|
|
X86::AND32rr, X86::AND32rr,
|
|
|
|
X86::AND32ri, X86::AND32ri,
|
|
|
|
true);
|
|
|
|
case X86::ATOMADD6432:
|
|
|
|
return EmitAtomicBit6432WithCustomInserter(MI, BB,
|
|
|
|
X86::ADD32rr, X86::ADC32rr,
|
|
|
|
X86::ADD32ri, X86::ADC32ri,
|
|
|
|
false);
|
|
|
|
case X86::ATOMSUB6432:
|
|
|
|
return EmitAtomicBit6432WithCustomInserter(MI, BB,
|
|
|
|
X86::SUB32rr, X86::SBB32rr,
|
|
|
|
X86::SUB32ri, X86::SBB32ri,
|
|
|
|
false);
|
2008-10-03 22:25:52 +00:00
|
|
|
case X86::ATOMSWAP6432:
|
|
|
|
return EmitAtomicBit6432WithCustomInserter(MI, BB,
|
|
|
|
X86::MOV32rr, X86::MOV32rr,
|
|
|
|
X86::MOV32ri, X86::MOV32ri,
|
|
|
|
false);
|
2006-07-05 22:17:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Optimization Hooks
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
|
2008-02-13 22:28:48 +00:00
|
|
|
const APInt &Mask,
|
2008-02-13 00:35:47 +00:00
|
|
|
APInt &KnownZero,
|
|
|
|
APInt &KnownOne,
|
2007-06-22 14:59:07 +00:00
|
|
|
const SelectionDAG &DAG,
|
2006-02-16 21:11:51 +00:00
|
|
|
unsigned Depth) const {
|
2005-12-21 23:05:39 +00:00
|
|
|
unsigned Opc = Op.getOpcode();
|
2006-04-05 06:11:20 +00:00
|
|
|
assert((Opc >= ISD::BUILTIN_OP_END ||
|
|
|
|
Opc == ISD::INTRINSIC_WO_CHAIN ||
|
|
|
|
Opc == ISD::INTRINSIC_W_CHAIN ||
|
|
|
|
Opc == ISD::INTRINSIC_VOID) &&
|
|
|
|
"Should use MaskedValueIsZero if you don't know whether Op"
|
|
|
|
" is a target node!");
|
2005-12-21 23:05:39 +00:00
|
|
|
|
2008-02-13 23:07:24 +00:00
|
|
|
KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0); // Don't know anything.
|
2005-12-21 23:05:39 +00:00
|
|
|
switch (Opc) {
|
2006-04-05 06:11:20 +00:00
|
|
|
default: break;
|
2006-11-21 00:01:06 +00:00
|
|
|
case X86ISD::SETCC:
|
2008-02-13 00:35:47 +00:00
|
|
|
KnownZero |= APInt::getHighBitsSet(Mask.getBitWidth(),
|
|
|
|
Mask.getBitWidth() - 1);
|
2006-02-16 21:11:51 +00:00
|
|
|
break;
|
2005-12-21 23:05:39 +00:00
|
|
|
}
|
|
|
|
}
|
2006-01-31 19:43:35 +00:00
|
|
|
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
|
2008-05-12 19:56:52 +00:00
|
|
|
/// node is a GlobalAddress + offset.
|
|
|
|
bool X86TargetLowering::isGAPlusOffset(SDNode *N,
|
|
|
|
GlobalValue* &GA, int64_t &Offset) const{
|
|
|
|
if (N->getOpcode() == X86ISD::Wrapper) {
|
|
|
|
if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2008-05-12 19:56:52 +00:00
|
|
|
return TargetLowering::isGAPlusOffset(N, GA, Offset);
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 19:56:52 +00:00
|
|
|
static bool isBaseAlignmentOfN(unsigned N, SDNode *Base,
|
|
|
|
const TargetLowering &TLI) {
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
GlobalValue *GV;
|
2008-02-02 08:29:58 +00:00
|
|
|
int64_t Offset = 0;
|
2008-05-12 19:56:52 +00:00
|
|
|
if (TLI.isGAPlusOffset(Base, GV, Offset))
|
2008-05-08 00:57:18 +00:00
|
|
|
return (GV->getAlignment() >= N && (Offset % N) == 0);
|
2008-01-26 20:07:42 +00:00
|
|
|
// DAG combine handles the stack object case.
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
static bool EltsFromConsecutiveLoads(SDNode *N, SDValue PermMask,
|
2008-06-06 12:08:01 +00:00
|
|
|
unsigned NumElems, MVT EVT,
|
2008-05-12 19:56:52 +00:00
|
|
|
SDNode *&Base,
|
|
|
|
SelectionDAG &DAG, MachineFrameInfo *MFI,
|
|
|
|
const TargetLowering &TLI) {
|
2008-05-08 00:57:18 +00:00
|
|
|
Base = NULL;
|
|
|
|
for (unsigned i = 0; i < NumElems; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Idx = PermMask.getOperand(i);
|
2008-05-08 00:57:18 +00:00
|
|
|
if (Idx.getOpcode() == ISD::UNDEF) {
|
|
|
|
if (!Base)
|
|
|
|
return false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Elt = DAG.getShuffleScalarElt(N, i);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (!Elt.getNode() ||
|
|
|
|
(Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
|
2008-05-08 00:57:18 +00:00
|
|
|
return false;
|
|
|
|
if (!Base) {
|
2008-08-28 21:40:38 +00:00
|
|
|
Base = Elt.getNode();
|
2008-05-10 06:46:49 +00:00
|
|
|
if (Base->getOpcode() == ISD::UNDEF)
|
|
|
|
return false;
|
2008-05-08 00:57:18 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (Elt.getOpcode() == ISD::UNDEF)
|
|
|
|
continue;
|
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (!TLI.isConsecutiveLoad(Elt.getNode(), Base,
|
2008-06-06 12:08:01 +00:00
|
|
|
EVT.getSizeInBits()/8, i, MFI))
|
2008-05-08 00:57:18 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
|
|
|
|
/// PerformShuffleCombine - Combine a vector_shuffle that is equal to
|
|
|
|
/// build_vector load1, load2, load3, load4, <0, 1, 2, 3> into a 128-bit load
|
|
|
|
/// if the load addresses are consecutive, non-overlapping, and in the right
|
|
|
|
/// order.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
|
2008-05-12 19:56:52 +00:00
|
|
|
const TargetLowering &TLI) {
|
2008-05-08 00:57:18 +00:00
|
|
|
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = N->getValueType(0);
|
|
|
|
MVT EVT = VT.getVectorElementType();
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue PermMask = N->getOperand(2);
|
2008-05-05 22:12:23 +00:00
|
|
|
unsigned NumElems = PermMask.getNumOperands();
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
SDNode *Base = NULL;
|
2008-05-12 19:56:52 +00:00
|
|
|
if (!EltsFromConsecutiveLoads(N, PermMask, NumElems, EVT, Base,
|
|
|
|
DAG, MFI, TLI))
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
|
2007-07-27 17:16:43 +00:00
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(Base);
|
2008-08-28 21:40:38 +00:00
|
|
|
if (isBaseAlignmentOfN(16, Base->getOperand(1).getNode(), TLI))
|
2006-10-09 20:57:25 +00:00
|
|
|
return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
|
2007-07-27 17:16:43 +00:00
|
|
|
LD->getSrcValueOffset(), LD->isVolatile());
|
2008-05-05 22:12:23 +00:00
|
|
|
return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
|
|
|
|
LD->getSrcValueOffset(), LD->isVolatile(),
|
|
|
|
LD->getAlignment());
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
}
|
|
|
|
|
2008-05-12 23:04:07 +00:00
|
|
|
/// PerformBuildVectorCombine - build_vector 0,(load i64 / f64) -> movq / movsd.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG,
|
2008-09-24 23:26:36 +00:00
|
|
|
const X86Subtarget *Subtarget,
|
|
|
|
const TargetLowering &TLI) {
|
2008-05-29 08:22:04 +00:00
|
|
|
unsigned NumOps = N->getNumOperands();
|
|
|
|
|
2008-05-09 21:53:03 +00:00
|
|
|
// Ignore single operand BUILD_VECTOR.
|
2008-05-29 08:22:04 +00:00
|
|
|
if (NumOps == 1)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-05-09 21:53:03 +00:00
|
|
|
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = N->getValueType(0);
|
|
|
|
MVT EVT = VT.getVectorElementType();
|
2008-05-09 21:53:03 +00:00
|
|
|
if ((EVT != MVT::i64 && EVT != MVT::f64) || Subtarget->is64Bit())
|
|
|
|
// We are looking for load i64 and zero extend. We want to transform
|
|
|
|
// it before legalizer has a chance to expand it. Also look for i64
|
|
|
|
// BUILD_PAIR bit casted to f64.
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-05-09 21:53:03 +00:00
|
|
|
// This must be an insertion into a zero vector.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue HighElt = N->getOperand(1);
|
2008-05-10 00:58:41 +00:00
|
|
|
if (!isZeroNode(HighElt))
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-05-09 21:53:03 +00:00
|
|
|
|
|
|
|
// Value must be a load.
|
2008-08-28 21:40:38 +00:00
|
|
|
SDNode *Base = N->getOperand(0).getNode();
|
2008-05-09 21:53:03 +00:00
|
|
|
if (!isa<LoadSDNode>(Base)) {
|
2008-05-12 23:04:07 +00:00
|
|
|
if (Base->getOpcode() != ISD::BIT_CONVERT)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-08-28 21:40:38 +00:00
|
|
|
Base = Base->getOperand(0).getNode();
|
2008-05-12 23:04:07 +00:00
|
|
|
if (!isa<LoadSDNode>(Base))
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-05-09 21:53:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Transform it into VZEXT_LOAD addr.
|
2008-05-12 23:04:07 +00:00
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(Base);
|
2008-05-28 00:24:25 +00:00
|
|
|
|
|
|
|
// Load must not be an extload.
|
|
|
|
if (LD->getExtensionType() != ISD::NON_EXTLOAD)
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-05-28 00:24:25 +00:00
|
|
|
|
2008-09-24 23:26:36 +00:00
|
|
|
SDVTList Tys = DAG.getVTList(VT, MVT::Other);
|
|
|
|
SDValue Ops[] = { LD->getChain(), LD->getBasePtr() };
|
|
|
|
SDValue ResNode = DAG.getNode(X86ISD::VZEXT_LOAD, Tys, Ops, 2);
|
|
|
|
DAG.ReplaceAllUsesOfValueWith(SDValue(Base, 1), ResNode.getValue(1));
|
|
|
|
return ResNode;
|
2008-05-09 21:53:03 +00:00
|
|
|
}
|
|
|
|
|
2006-10-04 06:57:07 +00:00
|
|
|
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
|
2006-10-04 06:57:07 +00:00
|
|
|
const X86Subtarget *Subtarget) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Cond = N->getOperand(0);
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-10-04 06:57:07 +00:00
|
|
|
// If we have SSE[12] support, try to form min/max nodes.
|
|
|
|
if (Subtarget->hasSSE2() &&
|
|
|
|
(N->getValueType(0) == MVT::f32 || N->getValueType(0) == MVT::f64)) {
|
|
|
|
if (Cond.getOpcode() == ISD::SETCC) {
|
|
|
|
// Get the LHS/RHS of the select.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LHS = N->getOperand(1);
|
|
|
|
SDValue RHS = N->getOperand(2);
|
2006-10-04 06:57:07 +00:00
|
|
|
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-11-10 21:43:37 +00:00
|
|
|
unsigned Opcode = 0;
|
2006-10-04 06:57:07 +00:00
|
|
|
if (LHS == Cond.getOperand(0) && RHS == Cond.getOperand(1)) {
|
2006-10-05 04:11:26 +00:00
|
|
|
switch (CC) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETOLE: // (X <= Y) ? X : Y -> min
|
|
|
|
case ISD::SETULE:
|
|
|
|
case ISD::SETLE:
|
|
|
|
if (!UnsafeFPMath) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case ISD::SETOLT: // (X olt/lt Y) ? X : Y -> min
|
|
|
|
case ISD::SETLT:
|
2006-11-10 21:43:37 +00:00
|
|
|
Opcode = X86ISD::FMIN;
|
2006-10-05 04:11:26 +00:00
|
|
|
break;
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-10-05 04:11:26 +00:00
|
|
|
case ISD::SETOGT: // (X > Y) ? X : Y -> max
|
|
|
|
case ISD::SETUGT:
|
|
|
|
case ISD::SETGT:
|
|
|
|
if (!UnsafeFPMath) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case ISD::SETUGE: // (X uge/ge Y) ? X : Y -> max
|
|
|
|
case ISD::SETGE:
|
2006-11-10 21:43:37 +00:00
|
|
|
Opcode = X86ISD::FMAX;
|
2006-10-05 04:11:26 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-10-04 06:57:07 +00:00
|
|
|
} else if (LHS == Cond.getOperand(1) && RHS == Cond.getOperand(0)) {
|
2006-10-05 04:11:26 +00:00
|
|
|
switch (CC) {
|
|
|
|
default: break;
|
|
|
|
case ISD::SETOGT: // (X > Y) ? Y : X -> min
|
|
|
|
case ISD::SETUGT:
|
|
|
|
case ISD::SETGT:
|
|
|
|
if (!UnsafeFPMath) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case ISD::SETUGE: // (X uge/ge Y) ? Y : X -> min
|
|
|
|
case ISD::SETGE:
|
2006-11-10 21:43:37 +00:00
|
|
|
Opcode = X86ISD::FMIN;
|
2006-10-05 04:11:26 +00:00
|
|
|
break;
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-10-05 04:11:26 +00:00
|
|
|
case ISD::SETOLE: // (X <= Y) ? Y : X -> max
|
|
|
|
case ISD::SETULE:
|
|
|
|
case ISD::SETLE:
|
|
|
|
if (!UnsafeFPMath) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case ISD::SETOLT: // (X olt/lt Y) ? Y : X -> max
|
|
|
|
case ISD::SETLT:
|
2006-11-10 21:43:37 +00:00
|
|
|
Opcode = X86ISD::FMAX;
|
2006-10-05 04:11:26 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-10-04 06:57:07 +00:00
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-11-10 21:43:37 +00:00
|
|
|
if (Opcode)
|
|
|
|
return DAG.getNode(Opcode, N->getValueType(0), LHS, RHS);
|
2006-10-04 06:57:07 +00:00
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-10-04 06:57:07 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2006-10-04 06:57:07 +00:00
|
|
|
}
|
|
|
|
|
2008-02-22 02:09:43 +00:00
|
|
|
/// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
|
2008-02-22 02:09:43 +00:00
|
|
|
const X86Subtarget *Subtarget) {
|
|
|
|
// Turn load->store of MMX types into GPR load/stores. This avoids clobbering
|
|
|
|
// the FP state in cases where an emms may be missing.
|
2008-02-25 19:20:14 +00:00
|
|
|
// A preferable solution to the general problem is to figure out the right
|
|
|
|
// places to insert EMMS. This qualifies as a quick hack.
|
2008-05-08 00:57:18 +00:00
|
|
|
StoreSDNode *St = cast<StoreSDNode>(N);
|
2008-06-06 12:08:01 +00:00
|
|
|
if (St->getValue().getValueType().isVector() &&
|
|
|
|
St->getValue().getValueType().getSizeInBits() == 64 &&
|
2008-02-25 19:20:14 +00:00
|
|
|
isa<LoadSDNode>(St->getValue()) &&
|
|
|
|
!cast<LoadSDNode>(St->getValue())->isVolatile() &&
|
|
|
|
St->getChain().hasOneUse() && !St->isVolatile()) {
|
2008-08-28 21:40:38 +00:00
|
|
|
SDNode* LdVal = St->getValue().getNode();
|
2008-02-25 19:20:14 +00:00
|
|
|
LoadSDNode *Ld = 0;
|
|
|
|
int TokenFactorIndex = -1;
|
2008-07-27 21:46:04 +00:00
|
|
|
SmallVector<SDValue, 8> Ops;
|
2008-08-28 21:40:38 +00:00
|
|
|
SDNode* ChainVal = St->getChain().getNode();
|
2008-02-25 19:20:14 +00:00
|
|
|
// Must be a store of a load. We currently handle two cases: the load
|
|
|
|
// is a direct child, and it's under an intervening TokenFactor. It is
|
|
|
|
// possible to dig deeper under nested TokenFactors.
|
2008-02-25 22:29:22 +00:00
|
|
|
if (ChainVal == LdVal)
|
2008-02-25 19:20:14 +00:00
|
|
|
Ld = cast<LoadSDNode>(St->getChain());
|
|
|
|
else if (St->getValue().hasOneUse() &&
|
|
|
|
ChainVal->getOpcode() == ISD::TokenFactor) {
|
|
|
|
for (unsigned i=0, e = ChainVal->getNumOperands(); i != e; ++i) {
|
2008-08-28 21:40:38 +00:00
|
|
|
if (ChainVal->getOperand(i).getNode() == LdVal) {
|
2008-02-25 19:20:14 +00:00
|
|
|
TokenFactorIndex = i;
|
|
|
|
Ld = cast<LoadSDNode>(St->getValue());
|
|
|
|
} else
|
|
|
|
Ops.push_back(ChainVal->getOperand(i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (Ld) {
|
|
|
|
// If we are a 64-bit capable x86, lower to a single movq load/store pair.
|
|
|
|
if (Subtarget->is64Bit()) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewLd = DAG.getLoad(MVT::i64, Ld->getChain(),
|
2008-02-25 19:20:14 +00:00
|
|
|
Ld->getBasePtr(), Ld->getSrcValue(),
|
|
|
|
Ld->getSrcValueOffset(), Ld->isVolatile(),
|
|
|
|
Ld->getAlignment());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewChain = NewLd.getValue(1);
|
2008-02-25 19:20:14 +00:00
|
|
|
if (TokenFactorIndex != -1) {
|
2008-03-28 23:45:16 +00:00
|
|
|
Ops.push_back(NewChain);
|
2008-02-25 19:20:14 +00:00
|
|
|
NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0],
|
|
|
|
Ops.size());
|
|
|
|
}
|
|
|
|
return DAG.getStore(NewChain, NewLd, St->getBasePtr(),
|
|
|
|
St->getSrcValue(), St->getSrcValueOffset(),
|
|
|
|
St->isVolatile(), St->getAlignment());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, lower to two 32-bit copies.
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LoAddr = Ld->getBasePtr();
|
|
|
|
SDValue HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr,
|
2008-06-06 12:08:01 +00:00
|
|
|
DAG.getConstant(4, MVT::i32));
|
2008-02-25 19:20:14 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr,
|
2008-02-25 19:20:14 +00:00
|
|
|
Ld->getSrcValue(), Ld->getSrcValueOffset(),
|
|
|
|
Ld->isVolatile(), Ld->getAlignment());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue HiLd = DAG.getLoad(MVT::i32, Ld->getChain(), HiAddr,
|
2008-02-25 19:20:14 +00:00
|
|
|
Ld->getSrcValue(), Ld->getSrcValueOffset()+4,
|
|
|
|
Ld->isVolatile(),
|
|
|
|
MinAlign(Ld->getAlignment(), 4));
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue NewChain = LoLd.getValue(1);
|
2008-02-25 19:20:14 +00:00
|
|
|
if (TokenFactorIndex != -1) {
|
|
|
|
Ops.push_back(LoLd);
|
|
|
|
Ops.push_back(HiLd);
|
|
|
|
NewChain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Ops[0],
|
|
|
|
Ops.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
LoAddr = St->getBasePtr();
|
|
|
|
HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr,
|
2008-06-06 12:08:01 +00:00
|
|
|
DAG.getConstant(4, MVT::i32));
|
2008-02-25 19:20:14 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue LoSt = DAG.getStore(NewChain, LoLd, LoAddr,
|
2008-02-22 02:09:43 +00:00
|
|
|
St->getSrcValue(), St->getSrcValueOffset(),
|
|
|
|
St->isVolatile(), St->getAlignment());
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue HiSt = DAG.getStore(NewChain, HiLd, HiAddr,
|
2008-08-28 23:19:51 +00:00
|
|
|
St->getSrcValue(),
|
|
|
|
St->getSrcValueOffset() + 4,
|
2008-02-25 19:20:14 +00:00
|
|
|
St->isVolatile(),
|
|
|
|
MinAlign(St->getAlignment(), 4));
|
|
|
|
return DAG.getNode(ISD::TokenFactor, MVT::Other, LoSt, HiSt);
|
2008-02-22 02:09:43 +00:00
|
|
|
}
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
2008-02-22 02:09:43 +00:00
|
|
|
}
|
|
|
|
|
2008-01-25 06:14:17 +00:00
|
|
|
/// PerformFORCombine - Do target-specific dag combines on X86ISD::FOR and
|
|
|
|
/// X86ISD::FXOR nodes.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
|
2008-01-25 06:14:17 +00:00
|
|
|
assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
|
|
|
|
// F[X]OR(0.0, x) -> x
|
|
|
|
// F[X]OR(x, 0.0) -> x
|
Add target-specific dag combines for FAND(x,0) and FOR(x,0). This allows
us to compile:
double test(double X) {
return copysign(0.0, X);
}
into:
_test:
andpd LCPI1_0(%rip), %xmm0
ret
instead of:
_test:
pxor %xmm1, %xmm1
andpd LCPI1_0(%rip), %xmm1
movapd %xmm0, %xmm2
andpd LCPI1_1(%rip), %xmm2
movapd %xmm1, %xmm0
orpd %xmm2, %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46344 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-25 05:46:26 +00:00
|
|
|
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
|
|
|
|
if (C->getValueAPF().isPosZero())
|
|
|
|
return N->getOperand(1);
|
|
|
|
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
|
|
|
|
if (C->getValueAPF().isPosZero())
|
|
|
|
return N->getOperand(0);
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
Add target-specific dag combines for FAND(x,0) and FOR(x,0). This allows
us to compile:
double test(double X) {
return copysign(0.0, X);
}
into:
_test:
andpd LCPI1_0(%rip), %xmm0
ret
instead of:
_test:
pxor %xmm1, %xmm1
andpd LCPI1_0(%rip), %xmm1
movapd %xmm0, %xmm2
andpd LCPI1_1(%rip), %xmm2
movapd %xmm1, %xmm0
orpd %xmm2, %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46344 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-25 05:46:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes.
|
2008-07-27 21:46:04 +00:00
|
|
|
static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
|
Add target-specific dag combines for FAND(x,0) and FOR(x,0). This allows
us to compile:
double test(double X) {
return copysign(0.0, X);
}
into:
_test:
andpd LCPI1_0(%rip), %xmm0
ret
instead of:
_test:
pxor %xmm1, %xmm1
andpd LCPI1_0(%rip), %xmm1
movapd %xmm0, %xmm2
andpd LCPI1_1(%rip), %xmm2
movapd %xmm1, %xmm0
orpd %xmm2, %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46344 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-25 05:46:26 +00:00
|
|
|
// FAND(0.0, x) -> 0.0
|
|
|
|
// FAND(x, 0.0) -> 0.0
|
|
|
|
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
|
|
|
|
if (C->getValueAPF().isPosZero())
|
|
|
|
return N->getOperand(0);
|
|
|
|
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
|
|
|
|
if (C->getValueAPF().isPosZero())
|
|
|
|
return N->getOperand(1);
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
Add target-specific dag combines for FAND(x,0) and FOR(x,0). This allows
us to compile:
double test(double X) {
return copysign(0.0, X);
}
into:
_test:
andpd LCPI1_0(%rip), %xmm0
ret
instead of:
_test:
pxor %xmm1, %xmm1
andpd LCPI1_0(%rip), %xmm1
movapd %xmm0, %xmm2
andpd LCPI1_1(%rip), %xmm2
movapd %xmm1, %xmm0
orpd %xmm2, %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46344 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-25 05:46:26 +00:00
|
|
|
}
|
|
|
|
|
2006-10-04 06:57:07 +00:00
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
|
2008-11-05 06:03:38 +00:00
|
|
|
DAGCombinerInfo &DCI) const {
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
SelectionDAG &DAG = DCI.DAG;
|
|
|
|
switch (N->getOpcode()) {
|
|
|
|
default: break;
|
2008-05-12 19:56:52 +00:00
|
|
|
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
|
|
|
|
case ISD::BUILD_VECTOR:
|
|
|
|
return PerformBuildVectorCombine(N, DAG, Subtarget, *this);
|
Add target-specific dag combines for FAND(x,0) and FOR(x,0). This allows
us to compile:
double test(double X) {
return copysign(0.0, X);
}
into:
_test:
andpd LCPI1_0(%rip), %xmm0
ret
instead of:
_test:
pxor %xmm1, %xmm1
andpd LCPI1_0(%rip), %xmm1
movapd %xmm0, %xmm2
andpd LCPI1_1(%rip), %xmm2
movapd %xmm1, %xmm0
orpd %xmm2, %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46344 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-25 05:46:26 +00:00
|
|
|
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
|
2008-05-08 00:57:18 +00:00
|
|
|
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
|
2008-01-25 06:14:17 +00:00
|
|
|
case X86ISD::FXOR:
|
Add target-specific dag combines for FAND(x,0) and FOR(x,0). This allows
us to compile:
double test(double X) {
return copysign(0.0, X);
}
into:
_test:
andpd LCPI1_0(%rip), %xmm0
ret
instead of:
_test:
pxor %xmm1, %xmm1
andpd LCPI1_0(%rip), %xmm1
movapd %xmm0, %xmm2
andpd LCPI1_1(%rip), %xmm2
movapd %xmm1, %xmm0
orpd %xmm2, %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46344 91177308-0d34-0410-b5e6-96231b3b80d8
2008-01-25 05:46:26 +00:00
|
|
|
case X86ISD::FOR: return PerformFORCombine(N, DAG);
|
|
|
|
case X86ISD::FAND: return PerformFANDCombine(N, DAG);
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
}
|
|
|
|
|
2008-07-27 21:46:04 +00:00
|
|
|
return SDValue();
|
X86 target specific DAG combine: turn build_vector (load x), (load x+4),
(load x+8), (load x+12), <0, 1, 2, 3> to a single 128-bit load (aligned and
unaligned).
e.g.
__m128 test(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
_test:
movups 4(%esp), %xmm0
ret
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@29042 91177308-0d34-0410-b5e6-96231b3b80d8
2006-07-07 08:33:52 +00:00
|
|
|
}
|
|
|
|
|
2006-07-05 22:17:51 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Inline Assembly Support
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-07-11 02:54:03 +00:00
|
|
|
/// getConstraintType - Given a constraint letter, return the type of
|
|
|
|
/// constraint it is for this target.
|
|
|
|
X86TargetLowering::ConstraintType
|
2007-03-25 02:14:49 +00:00
|
|
|
X86TargetLowering::getConstraintType(const std::string &Constraint) const {
|
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
switch (Constraint[0]) {
|
|
|
|
case 'A':
|
2008-11-13 21:52:36 +00:00
|
|
|
return C_Register;
|
2008-03-11 19:06:29 +00:00
|
|
|
case 'f':
|
2007-03-25 02:14:49 +00:00
|
|
|
case 'r':
|
|
|
|
case 'R':
|
|
|
|
case 'l':
|
|
|
|
case 'q':
|
|
|
|
case 'Q':
|
|
|
|
case 'x':
|
2008-04-01 00:57:48 +00:00
|
|
|
case 'y':
|
2007-03-25 02:14:49 +00:00
|
|
|
case 'Y':
|
|
|
|
return C_RegisterClass;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2006-07-11 02:54:03 +00:00
|
|
|
}
|
2007-03-25 02:14:49 +00:00
|
|
|
return TargetLowering::getConstraintType(Constraint);
|
2006-07-11 02:54:03 +00:00
|
|
|
}
|
|
|
|
|
2008-01-29 02:21:21 +00:00
|
|
|
/// LowerXConstraint - try to replace an X constraint, which matches anything,
|
|
|
|
/// with another that has more specific requirements based on the type of the
|
|
|
|
/// corresponding operand.
|
2008-04-26 23:02:14 +00:00
|
|
|
const char *X86TargetLowering::
|
2008-06-06 12:08:01 +00:00
|
|
|
LowerXConstraint(MVT ConstraintVT) const {
|
2008-04-26 23:02:14 +00:00
|
|
|
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
|
|
|
|
// 'f' like normal targets.
|
2008-06-06 12:08:01 +00:00
|
|
|
if (ConstraintVT.isFloatingPoint()) {
|
2008-01-29 02:21:21 +00:00
|
|
|
if (Subtarget->hasSSE2())
|
2008-04-26 23:02:14 +00:00
|
|
|
return "Y";
|
|
|
|
if (Subtarget->hasSSE1())
|
|
|
|
return "x";
|
|
|
|
}
|
|
|
|
|
|
|
|
return TargetLowering::LowerXConstraint(ConstraintVT);
|
2008-01-29 02:21:21 +00:00
|
|
|
}
|
|
|
|
|
2007-08-25 00:47:38 +00:00
|
|
|
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
|
|
|
|
/// vector. If it is invalid, don't add anything to Ops.
|
2008-07-27 21:46:04 +00:00
|
|
|
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
2007-08-25 00:47:38 +00:00
|
|
|
char Constraint,
|
2008-09-24 00:05:32 +00:00
|
|
|
bool hasMemory,
|
2008-07-27 21:46:04 +00:00
|
|
|
std::vector<SDValue>&Ops,
|
2008-04-26 23:02:14 +00:00
|
|
|
SelectionDAG &DAG) const {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Result(0, 0);
|
2007-08-25 00:47:38 +00:00
|
|
|
|
2006-10-31 20:13:11 +00:00
|
|
|
switch (Constraint) {
|
|
|
|
default: break;
|
2007-03-17 00:13:28 +00:00
|
|
|
case 'I':
|
2007-03-25 01:57:35 +00:00
|
|
|
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
|
2008-09-12 16:56:44 +00:00
|
|
|
if (C->getZExtValue() <= 31) {
|
|
|
|
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
|
2007-08-25 00:47:38 +00:00
|
|
|
break;
|
|
|
|
}
|
2007-03-17 00:13:28 +00:00
|
|
|
}
|
2007-08-25 00:47:38 +00:00
|
|
|
return;
|
2008-09-22 23:57:37 +00:00
|
|
|
case 'J':
|
|
|
|
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
|
|
|
|
if (C->getZExtValue() <= 63) {
|
|
|
|
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
2007-03-25 01:57:35 +00:00
|
|
|
case 'N':
|
|
|
|
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
|
2008-09-12 16:56:44 +00:00
|
|
|
if (C->getZExtValue() <= 255) {
|
|
|
|
Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
|
2007-08-25 00:47:38 +00:00
|
|
|
break;
|
|
|
|
}
|
2007-03-25 01:57:35 +00:00
|
|
|
}
|
2007-08-25 00:47:38 +00:00
|
|
|
return;
|
2007-05-03 16:52:29 +00:00
|
|
|
case 'i': {
|
2006-10-31 20:13:11 +00:00
|
|
|
// Literal immediates are always ok.
|
2007-08-25 00:47:38 +00:00
|
|
|
if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
|
2008-09-12 16:56:44 +00:00
|
|
|
Result = DAG.getTargetConstant(CST->getZExtValue(), Op.getValueType());
|
2007-08-25 00:47:38 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2007-05-03 16:52:29 +00:00
|
|
|
// If we are in non-pic codegen mode, we allow the address of a global (with
|
|
|
|
// an optional displacement) to be used with 'i'.
|
|
|
|
GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op);
|
|
|
|
int64_t Offset = 0;
|
|
|
|
|
|
|
|
// Match either (GA) or (GA+C)
|
|
|
|
if (GA) {
|
|
|
|
Offset = GA->getOffset();
|
|
|
|
} else if (Op.getOpcode() == ISD::ADD) {
|
|
|
|
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
|
|
|
|
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
|
|
|
|
if (C && GA) {
|
2008-09-12 16:56:44 +00:00
|
|
|
Offset = GA->getOffset()+C->getZExtValue();
|
2007-05-03 16:52:29 +00:00
|
|
|
} else {
|
|
|
|
C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
|
|
|
|
GA = dyn_cast<GlobalAddressSDNode>(Op.getOperand(0));
|
|
|
|
if (C && GA)
|
2008-09-12 16:56:44 +00:00
|
|
|
Offset = GA->getOffset()+C->getZExtValue();
|
2007-05-03 16:52:29 +00:00
|
|
|
else
|
|
|
|
C = 0, GA = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (GA) {
|
2008-09-24 00:05:32 +00:00
|
|
|
if (hasMemory)
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57748 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-18 02:06:02 +00:00
|
|
|
Op = LowerGlobalAddress(GA->getGlobal(), Offset, DAG);
|
2008-09-24 00:05:32 +00:00
|
|
|
else
|
|
|
|
Op = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
|
|
|
|
Offset);
|
2007-08-25 00:47:38 +00:00
|
|
|
Result = Op;
|
|
|
|
break;
|
2006-10-31 20:13:11 +00:00
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-10-31 20:13:11 +00:00
|
|
|
// Otherwise, not valid for this mode.
|
2007-08-25 00:47:38 +00:00
|
|
|
return;
|
2006-10-31 20:13:11 +00:00
|
|
|
}
|
2007-05-03 16:52:29 +00:00
|
|
|
}
|
2007-08-25 00:47:38 +00:00
|
|
|
|
2008-08-28 21:40:38 +00:00
|
|
|
if (Result.getNode()) {
|
2007-08-25 00:47:38 +00:00
|
|
|
Ops.push_back(Result);
|
|
|
|
return;
|
|
|
|
}
|
2008-09-24 00:05:32 +00:00
|
|
|
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
|
|
|
|
Ops, DAG);
|
2006-10-31 20:13:11 +00:00
|
|
|
}
|
|
|
|
|
2006-01-31 19:43:35 +00:00
|
|
|
std::vector<unsigned> X86TargetLowering::
|
2006-02-22 00:56:39 +00:00
|
|
|
getRegClassForInlineAsmConstraint(const std::string &Constraint,
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT) const {
|
2006-01-31 19:43:35 +00:00
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
// FIXME: not handling fp-stack yet!
|
|
|
|
switch (Constraint[0]) { // GCC X86 Constraint Letters
|
2006-07-11 02:54:03 +00:00
|
|
|
default: break; // Unknown constraint letter
|
2006-01-31 19:43:35 +00:00
|
|
|
case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
|
|
|
|
case 'Q': // Q_REGS
|
2006-05-06 00:29:37 +00:00
|
|
|
if (VT == MVT::i32)
|
|
|
|
return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
|
|
|
|
else if (VT == MVT::i16)
|
|
|
|
return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
|
|
|
|
else if (VT == MVT::i8)
|
2007-08-13 23:27:11 +00:00
|
|
|
return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::BL, 0);
|
2007-11-04 06:51:12 +00:00
|
|
|
else if (VT == MVT::i64)
|
|
|
|
return make_vector<unsigned>(X86::RAX, X86::RDX, X86::RCX, X86::RBX, 0);
|
|
|
|
break;
|
2006-01-31 19:43:35 +00:00
|
|
|
}
|
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-02-22 00:56:39 +00:00
|
|
|
return std::vector<unsigned>();
|
2006-01-31 19:43:35 +00:00
|
|
|
}
|
2006-07-31 23:26:50 +00:00
|
|
|
|
2006-11-21 00:01:06 +00:00
|
|
|
std::pair<unsigned, const TargetRegisterClass*>
|
2006-07-31 23:26:50 +00:00
|
|
|
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT) const {
|
2007-04-09 05:11:28 +00:00
|
|
|
// First, see if this is a constraint that directly corresponds to an LLVM
|
|
|
|
// register class.
|
|
|
|
if (Constraint.size() == 1) {
|
|
|
|
// GCC Constraint Letters
|
|
|
|
switch (Constraint[0]) {
|
|
|
|
default: break;
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35804 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-09 05:49:22 +00:00
|
|
|
case 'r': // GENERAL_REGS
|
|
|
|
case 'R': // LEGACY_REGS
|
|
|
|
case 'l': // INDEX_REGS
|
2008-10-17 18:15:05 +00:00
|
|
|
if (VT == MVT::i8)
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35804 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-09 05:49:22 +00:00
|
|
|
return std::make_pair(0U, X86::GR8RegisterClass);
|
2008-10-17 18:15:05 +00:00
|
|
|
if (VT == MVT::i16)
|
|
|
|
return std::make_pair(0U, X86::GR16RegisterClass);
|
|
|
|
if (VT == MVT::i32 || !Subtarget->is64Bit())
|
|
|
|
return std::make_pair(0U, X86::GR32RegisterClass);
|
|
|
|
return std::make_pair(0U, X86::GR64RegisterClass);
|
2008-03-11 19:06:29 +00:00
|
|
|
case 'f': // FP Stack registers.
|
|
|
|
// If SSE is enabled for this VT, use f80 to ensure the isel moves the
|
|
|
|
// value to the correct fpstack register class.
|
|
|
|
if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
|
|
|
|
return std::make_pair(0U, X86::RFP32RegisterClass);
|
|
|
|
if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
|
|
|
|
return std::make_pair(0U, X86::RFP64RegisterClass);
|
|
|
|
return std::make_pair(0U, X86::RFP80RegisterClass);
|
2007-04-12 04:14:49 +00:00
|
|
|
case 'y': // MMX_REGS if MMX allowed.
|
|
|
|
if (!Subtarget->hasMMX()) break;
|
|
|
|
return std::make_pair(0U, X86::VR64RegisterClass);
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35804 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-09 05:49:22 +00:00
|
|
|
case 'Y': // SSE_REGS if SSE2 allowed
|
|
|
|
if (!Subtarget->hasSSE2()) break;
|
|
|
|
// FALL THROUGH.
|
|
|
|
case 'x': // SSE_REGS if SSE1 allowed
|
|
|
|
if (!Subtarget->hasSSE1()) break;
|
2008-06-06 12:08:01 +00:00
|
|
|
|
|
|
|
switch (VT.getSimpleVT()) {
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35804 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-09 05:49:22 +00:00
|
|
|
default: break;
|
|
|
|
// Scalar SSE types.
|
|
|
|
case MVT::f32:
|
|
|
|
case MVT::i32:
|
2007-04-09 05:11:28 +00:00
|
|
|
return std::make_pair(0U, X86::FR32RegisterClass);
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35804 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-09 05:49:22 +00:00
|
|
|
case MVT::f64:
|
|
|
|
case MVT::i64:
|
2007-04-09 05:11:28 +00:00
|
|
|
return std::make_pair(0U, X86::FR64RegisterClass);
|
move a bunch of register constraints from being handled by
getRegClassForInlineAsmConstraint to being handled by
getRegForInlineAsmConstraint. This allows us to let the llvm register allocator
allocate, which gives us better code. For example, X86/2007-01-29-InlineAsm-ir.ll
used to compile to:
_run_init_process:
subl $4, %esp
movl %ebx, (%esp)
xorl %ebx, %ebx
movl $11, %eax
movl %ebx, %ecx
movl %ebx, %edx
# InlineAsm Start
push %ebx ; movl %ebx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
Now we get:
_run_init_process:
xorl %ecx, %ecx
movl $11, %eax
movl %ecx, %edx
# InlineAsm Start
push %ebx ; movl %ecx,%ebx ; int $0x80 ; pop %ebx
# InlineAsm End
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35804 91177308-0d34-0410-b5e6-96231b3b80d8
2007-04-09 05:49:22 +00:00
|
|
|
// Vector types.
|
|
|
|
case MVT::v16i8:
|
|
|
|
case MVT::v8i16:
|
|
|
|
case MVT::v4i32:
|
|
|
|
case MVT::v2i64:
|
|
|
|
case MVT::v4f32:
|
|
|
|
case MVT::v2f64:
|
|
|
|
return std::make_pair(0U, X86::VR128RegisterClass);
|
|
|
|
}
|
2007-04-09 05:11:28 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-31 23:26:50 +00:00
|
|
|
// Use the default implementation in TargetLowering to convert the register
|
|
|
|
// constraint into a member of a register class.
|
|
|
|
std::pair<unsigned, const TargetRegisterClass*> Res;
|
|
|
|
Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
|
2006-10-31 19:42:44 +00:00
|
|
|
|
|
|
|
// Not found as a standard register?
|
|
|
|
if (Res.second == 0) {
|
|
|
|
// GCC calls "st(0)" just plain "st".
|
|
|
|
if (StringsEqualNoCase("{st}", Constraint)) {
|
|
|
|
Res.first = X86::ST0;
|
2007-09-24 05:27:37 +00:00
|
|
|
Res.second = X86::RFP80RegisterClass;
|
2006-10-31 19:42:44 +00:00
|
|
|
}
|
2008-11-13 21:52:36 +00:00
|
|
|
// 'A' means EAX + EDX.
|
|
|
|
if (Constraint == "A") {
|
|
|
|
Res.first = X86::EAX;
|
|
|
|
Res.second = X86::GRADRegisterClass;
|
|
|
|
}
|
2006-10-31 19:42:44 +00:00
|
|
|
return Res;
|
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-07-31 23:26:50 +00:00
|
|
|
// Otherwise, check to see if this is a register class of the wrong value
|
|
|
|
// type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
|
|
|
|
// turn into {ax},{dx}.
|
|
|
|
if (Res.second->hasType(VT))
|
|
|
|
return Res; // Correct type already, nothing to do.
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-07-31 23:26:50 +00:00
|
|
|
// All of the single-register GCC register classes map their values onto
|
|
|
|
// 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
|
|
|
|
// really want an 8-bit or 32-bit register, map to the appropriate register
|
|
|
|
// class and return the appropriate register.
|
2008-08-26 06:19:02 +00:00
|
|
|
if (Res.second == X86::GR16RegisterClass) {
|
|
|
|
if (VT == MVT::i8) {
|
|
|
|
unsigned DestReg = 0;
|
|
|
|
switch (Res.first) {
|
|
|
|
default: break;
|
|
|
|
case X86::AX: DestReg = X86::AL; break;
|
|
|
|
case X86::DX: DestReg = X86::DL; break;
|
|
|
|
case X86::CX: DestReg = X86::CL; break;
|
|
|
|
case X86::BX: DestReg = X86::BL; break;
|
|
|
|
}
|
|
|
|
if (DestReg) {
|
|
|
|
Res.first = DestReg;
|
|
|
|
Res.second = Res.second = X86::GR8RegisterClass;
|
|
|
|
}
|
|
|
|
} else if (VT == MVT::i32) {
|
|
|
|
unsigned DestReg = 0;
|
|
|
|
switch (Res.first) {
|
|
|
|
default: break;
|
|
|
|
case X86::AX: DestReg = X86::EAX; break;
|
|
|
|
case X86::DX: DestReg = X86::EDX; break;
|
|
|
|
case X86::CX: DestReg = X86::ECX; break;
|
|
|
|
case X86::BX: DestReg = X86::EBX; break;
|
|
|
|
case X86::SI: DestReg = X86::ESI; break;
|
|
|
|
case X86::DI: DestReg = X86::EDI; break;
|
|
|
|
case X86::BP: DestReg = X86::EBP; break;
|
|
|
|
case X86::SP: DestReg = X86::ESP; break;
|
|
|
|
}
|
|
|
|
if (DestReg) {
|
|
|
|
Res.first = DestReg;
|
|
|
|
Res.second = Res.second = X86::GR32RegisterClass;
|
|
|
|
}
|
|
|
|
} else if (VT == MVT::i64) {
|
|
|
|
unsigned DestReg = 0;
|
|
|
|
switch (Res.first) {
|
|
|
|
default: break;
|
|
|
|
case X86::AX: DestReg = X86::RAX; break;
|
|
|
|
case X86::DX: DestReg = X86::RDX; break;
|
|
|
|
case X86::CX: DestReg = X86::RCX; break;
|
|
|
|
case X86::BX: DestReg = X86::RBX; break;
|
|
|
|
case X86::SI: DestReg = X86::RSI; break;
|
|
|
|
case X86::DI: DestReg = X86::RDI; break;
|
|
|
|
case X86::BP: DestReg = X86::RBP; break;
|
|
|
|
case X86::SP: DestReg = X86::RSP; break;
|
|
|
|
}
|
|
|
|
if (DestReg) {
|
|
|
|
Res.first = DestReg;
|
|
|
|
Res.second = Res.second = X86::GR64RegisterClass;
|
|
|
|
}
|
2006-09-08 06:48:29 +00:00
|
|
|
}
|
2008-08-26 06:19:02 +00:00
|
|
|
} else if (Res.second == X86::FR32RegisterClass ||
|
|
|
|
Res.second == X86::FR64RegisterClass ||
|
|
|
|
Res.second == X86::VR128RegisterClass) {
|
|
|
|
// Handle references to XMM physical registers that got mapped into the
|
|
|
|
// wrong class. This can happen with constraints like {xmm0} where the
|
|
|
|
// target independent register mapper will just pick the first match it can
|
|
|
|
// find, ignoring the required type.
|
|
|
|
if (VT == MVT::f32)
|
|
|
|
Res.second = X86::FR32RegisterClass;
|
|
|
|
else if (VT == MVT::f64)
|
|
|
|
Res.second = X86::FR64RegisterClass;
|
|
|
|
else if (X86::VR128RegisterClass->hasType(VT))
|
|
|
|
Res.second = X86::VR128RegisterClass;
|
2006-07-31 23:26:50 +00:00
|
|
|
}
|
2006-11-21 00:01:06 +00:00
|
|
|
|
2006-07-31 23:26:50 +00:00
|
|
|
return Res;
|
|
|
|
}
|
2008-10-30 08:01:45 +00:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Widen vector type
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
/// getWidenVectorType: given a vector type, returns the type to widen
|
|
|
|
/// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
|
|
|
|
/// If there is no vector type that we want to widen to, returns MVT::Other
|
2008-11-06 05:31:54 +00:00
|
|
|
/// When and where to widen is target dependent based on the cost of
|
2008-10-30 08:01:45 +00:00
|
|
|
/// scalarizing vs using the wider vector type.
|
|
|
|
|
|
|
|
MVT X86TargetLowering::getWidenVectorType(MVT VT) {
|
|
|
|
assert(VT.isVector());
|
|
|
|
if (isTypeLegal(VT))
|
|
|
|
return VT;
|
|
|
|
|
|
|
|
// TODO: In computeRegisterProperty, we can compute the list of legal vector
|
|
|
|
// type based on element type. This would speed up our search (though
|
|
|
|
// it may not be worth it since the size of the list is relatively
|
|
|
|
// small).
|
|
|
|
MVT EltVT = VT.getVectorElementType();
|
|
|
|
unsigned NElts = VT.getVectorNumElements();
|
|
|
|
|
|
|
|
// On X86, it make sense to widen any vector wider than 1
|
|
|
|
if (NElts <= 1)
|
|
|
|
return MVT::Other;
|
|
|
|
|
|
|
|
for (unsigned nVT = MVT::FIRST_VECTOR_VALUETYPE;
|
|
|
|
nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
|
|
|
|
MVT SVT = (MVT::SimpleValueType)nVT;
|
|
|
|
|
|
|
|
if (isTypeLegal(SVT) &&
|
|
|
|
SVT.getVectorElementType() == EltVT &&
|
|
|
|
SVT.getVectorNumElements() > NElts)
|
|
|
|
return SVT;
|
|
|
|
}
|
|
|
|
return MVT::Other;
|
|
|
|
}
|