Special handling for MMX values being passed in either GPR64 or lower 64-bits of XMM registers.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@50289 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2008-04-25 19:11:04 +00:00
parent 23ce502cb7
commit 10e864276b
3 changed files with 57 additions and 2 deletions

View File

@ -41,6 +41,9 @@
#include "llvm/ADT/StringExtras.h"
using namespace llvm;
// Forward declarations.
static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG);
X86TargetLowering::X86TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
@ -1547,8 +1550,7 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
SDOperand StackPtr;
bool containsTailCallByValArg = false;
SmallVector<std::pair<unsigned, unsigned>, 8> TailCallByValClobberedVRegs;
SmallVector<MVT::ValueType, 8> TailCallByValClobberedVRegTypes;
SmallVector<MVT::ValueType, 8> TailCallByValClobberedVRegTypes;
// Walk the register/memloc assignments, inserting copies/loads. For tail
// calls, remember all arguments for later special lowering.
@ -1574,6 +1576,30 @@ SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
}
if (VA.isRegLoc()) {
if (Is64Bit) {
MVT::ValueType RegVT = VA.getLocVT();
if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 64)
switch (VA.getLocReg()) {
default:
break;
case X86::RDI: case X86::RSI: case X86::RDX: case X86::RCX:
case X86::R8: {
// Special case: passing MMX values in GPR registers.
Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg);
break;
}
case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3:
case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7: {
// Special case: passing MMX values in XMM registers.
Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i64, Arg);
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Arg);
Arg = DAG.getNode(ISD::VECTOR_SHUFFLE, MVT::v2i64,
DAG.getNode(ISD::UNDEF, MVT::v2i64), Arg,
getMOVLMask(2, DAG));
break;
}
}
}
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
if (!IsTailCall || (IsTailCall && isByVal)) {

View File

@ -637,3 +637,7 @@ def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v4i16 immAllOnesV_bc))),
def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8 immAllOnesV_bc))),
(load addr:$src2))),
(MMX_PANDNrm VR64:$src1, addr:$src2)>;
// Move MMX to lower 64-bit of XMM
def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src)))),
(v2i64 (MMX_MOVQ2DQrr VR64:$src))>;

View File

@ -0,0 +1,25 @@
; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movq2dq
; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movd | count 1
; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | grep movq | count 4
@g_v8qi = external global <8 x i8>
define void @t1() nounwind {
%tmp3 = load <8 x i8>* @g_v8qi, align 8
%tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind
ret void
}
define void @t2(<8 x i8> %v1, <8 x i8> %v2) nounwind {
%tmp3 = add <8 x i8> %v1, %v2
%tmp4 = tail call i32 (...)* @pass_v8qi( <8 x i8> %tmp3 ) nounwind
ret void
}
define void @t3() nounwind {
call void @pass_v1di( <1 x i64> zeroinitializer )
ret void
}
declare i32 @pass_v8qi(...)
declare void @pass_v1di(<1 x i64>)