mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-03 15:36:21 +00:00
Revert 131467 due to buildbot complaint.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@131469 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
504421e327
commit
6db2c2fe21
@ -48,12 +48,8 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
|
|||||||
if (MinAlign > (int)Align)
|
if (MinAlign > (int)Align)
|
||||||
Align = MinAlign;
|
Align = MinAlign;
|
||||||
TM.getTargetLowering()->HandleByVal(const_cast<CCState*>(this), Size);
|
TM.getTargetLowering()->HandleByVal(const_cast<CCState*>(this), Size);
|
||||||
if (Size != 0) {
|
unsigned Offset = AllocateStack(Size, Align);
|
||||||
unsigned Offset = AllocateStack(Size, Align);
|
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
|
||||||
addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
|
|
||||||
} else {
|
|
||||||
addLoc(CCValAssign::getReg(ValNo, ValVT, getFirstByValReg(), LocVT, LocInfo));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// MarkAllocated - Mark a register and all of its aliases as allocated.
|
/// MarkAllocated - Mark a register and all of its aliases as allocated.
|
||||||
|
@ -2091,36 +2091,21 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (VA.isRegLoc()) {
|
if (VA.isRegLoc()) {
|
||||||
if (isByVal && (!IsSibcall && !isTailCall)) {
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
||||||
// 64-bit only. x86_32 passes everything on the stack.
|
if (isVarArg && IsWin64) {
|
||||||
assert(CCInfo.isFirstByValRegValid() && "isByVal, but no valid register assigned!");
|
// Win64 ABI requires argument XMM reg to be copied to the corresponding
|
||||||
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
|
// shadow reg if callee is a varargs function.
|
||||||
unsigned int i, j;
|
unsigned ShadowReg = 0;
|
||||||
for (i = 0, j = CCInfo.getFirstByValReg(); j <= X86::R9; i++, j++) {
|
switch (VA.getLocReg()) {
|
||||||
SDValue Const = DAG.getConstant(8*i, MVT::i64);
|
case X86::XMM0: ShadowReg = X86::RCX; break;
|
||||||
SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
|
case X86::XMM1: ShadowReg = X86::RDX; break;
|
||||||
SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
|
case X86::XMM2: ShadowReg = X86::R8; break;
|
||||||
MachinePointerInfo(),
|
case X86::XMM3: ShadowReg = X86::R9; break;
|
||||||
false, false, 0);
|
|
||||||
MemOpChains.push_back(Load.getValue(1));
|
|
||||||
RegsToPass.push_back(std::make_pair(j, Load));
|
|
||||||
}
|
}
|
||||||
CCInfo.clearFirstByValReg();
|
if (ShadowReg)
|
||||||
} else {
|
RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
|
||||||
|
} else { // Usual case: not byval.
|
||||||
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
||||||
if (isVarArg && IsWin64) {
|
|
||||||
// Win64 ABI requires argument XMM reg to be copied to the corresponding
|
|
||||||
// shadow reg if callee is a varargs function.
|
|
||||||
unsigned ShadowReg = 0;
|
|
||||||
switch (VA.getLocReg()) {
|
|
||||||
case X86::XMM0: ShadowReg = X86::RCX; break;
|
|
||||||
case X86::XMM1: ShadowReg = X86::RDX; break;
|
|
||||||
case X86::XMM2: ShadowReg = X86::R8; break;
|
|
||||||
case X86::XMM3: ShadowReg = X86::R9; break;
|
|
||||||
}
|
|
||||||
if (ShadowReg)
|
|
||||||
RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else if (!IsSibcall && (!isTailCall || isByVal)) {
|
} else if (!IsSibcall && (!isTailCall || isByVal)) {
|
||||||
assert(VA.isMemLoc());
|
assert(VA.isMemLoc());
|
||||||
@ -2455,47 +2440,6 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
|
|||||||
return Offset;
|
return Offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// HandleByVal - Every parameter *after* a byval parameter is passed
|
|
||||||
/// on the stack. Remember the next parameter register to allocate,
|
|
||||||
/// and then confiscate the rest of the parameter registers to insure
|
|
||||||
/// this.
|
|
||||||
void
|
|
||||||
X86TargetLowering::HandleByVal(CCState *State, unsigned &size) const {
|
|
||||||
// X86_32 passes all parameters on the stack, byval or whatever.
|
|
||||||
// X86_64 does not split parameters between registers and memory; if
|
|
||||||
// the parameter does not fit entirely inside the remaining
|
|
||||||
// parameter registers, it goes on the stack.
|
|
||||||
static const unsigned RegList2[] = {
|
|
||||||
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!Subtarget->is64Bit())
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (size > 16) // X86_64 aggregates > 16 bytes are passed in memory.
|
|
||||||
return;
|
|
||||||
|
|
||||||
unsigned reg = State->getFirstUnallocated(RegList2, 6);
|
|
||||||
|
|
||||||
if (reg == 6) // Out of regs to allocate.
|
|
||||||
return;
|
|
||||||
|
|
||||||
// We expect the size to be 32 bits, or some non-zero multiple of 64 bits.
|
|
||||||
unsigned nregs = size / 8;
|
|
||||||
if (nregs == 0) nregs=1; // 32-bit case.
|
|
||||||
|
|
||||||
unsigned regs_available = 6 - reg;
|
|
||||||
|
|
||||||
if (nregs <= regs_available) {
|
|
||||||
size = 0;
|
|
||||||
State->setFirstByValReg(RegList2[reg]);
|
|
||||||
while (nregs--) {
|
|
||||||
State->AllocateReg(RegList2[reg]);
|
|
||||||
reg++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// MatchingStackOffset - Return true if the given stack call argument is
|
/// MatchingStackOffset - Return true if the given stack call argument is
|
||||||
/// already available in the same position (relatively) of the caller's
|
/// already available in the same position (relatively) of the caller's
|
||||||
/// incoming argument stack.
|
/// incoming argument stack.
|
||||||
@ -2660,7 +2604,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
|||||||
}
|
}
|
||||||
|
|
||||||
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
|
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
|
||||||
if (ArgLocs.size()) {
|
if (CCInfo.getNextStackOffset()) {
|
||||||
MachineFunction &MF = DAG.getMachineFunction();
|
MachineFunction &MF = DAG.getMachineFunction();
|
||||||
if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
|
if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
|
||||||
return false;
|
return false;
|
||||||
@ -2677,8 +2621,6 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
|
|||||||
ISD::ArgFlagsTy Flags = Outs[i].Flags;
|
ISD::ArgFlagsTy Flags = Outs[i].Flags;
|
||||||
if (VA.getLocInfo() == CCValAssign::Indirect)
|
if (VA.getLocInfo() == CCValAssign::Indirect)
|
||||||
return false;
|
return false;
|
||||||
if (Flags.isByVal())
|
|
||||||
return false;
|
|
||||||
if (!VA.isRegLoc()) {
|
if (!VA.isRegLoc()) {
|
||||||
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
|
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
|
||||||
MFI, MRI, TII))
|
MFI, MRI, TII))
|
||||||
@ -11121,6 +11063,13 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
|
|||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
|
||||||
|
/// generation and convert it from being a bunch of shuffles and extracts
|
||||||
|
/// to a simple store and scalar loads to extract the elements.
|
||||||
|
static SDValue PerformVectorZeroExtendCombine(SDNode *N, SelectionDAG &DAG) {
|
||||||
|
return SDValue();
|
||||||
|
}
|
||||||
|
|
||||||
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
|
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
|
||||||
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
|
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
|
||||||
const X86Subtarget *Subtarget) {
|
const X86Subtarget *Subtarget) {
|
||||||
@ -12181,6 +12130,9 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
|
|||||||
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
|
case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
|
||||||
case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG);
|
case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG);
|
||||||
case X86ISD::SETCC: return PerformSETCCCombine(N, DAG);
|
case X86ISD::SETCC: return PerformSETCCCombine(N, DAG);
|
||||||
|
case X86ISD::PMOVZXBW:
|
||||||
|
case X86ISD::PMOVZXWD:
|
||||||
|
case X86ISD::PMOVZXDQ: return PerformVectorZeroExtendCombine(N, DAG);
|
||||||
case X86ISD::SHUFPS: // Handle all target specific shuffles
|
case X86ISD::SHUFPS: // Handle all target specific shuffles
|
||||||
case X86ISD::SHUFPD:
|
case X86ISD::SHUFPD:
|
||||||
case X86ISD::PALIGN:
|
case X86ISD::PALIGN:
|
||||||
|
@ -263,6 +263,10 @@ namespace llvm {
|
|||||||
PUNPCKHDQ,
|
PUNPCKHDQ,
|
||||||
PUNPCKHQDQ,
|
PUNPCKHQDQ,
|
||||||
|
|
||||||
|
PMOVZXBW,
|
||||||
|
PMOVZXWD,
|
||||||
|
PMOVZXDQ,
|
||||||
|
|
||||||
// VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
|
// VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack,
|
||||||
// according to %al. An operator is needed so that this can be expanded
|
// according to %al. An operator is needed so that this can be expanded
|
||||||
// with control flow.
|
// with control flow.
|
||||||
@ -728,8 +732,6 @@ namespace llvm {
|
|||||||
|
|
||||||
// Call lowering helpers.
|
// Call lowering helpers.
|
||||||
|
|
||||||
void HandleByVal(CCState *, unsigned &) const;
|
|
||||||
|
|
||||||
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
|
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
|
||||||
/// for tail call optimization. Targets which want to do tail call
|
/// for tail call optimization. Targets which want to do tail call
|
||||||
/// optimization should implement this function.
|
/// optimization should implement this function.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user