mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-05 14:34:55 +00:00
CodeGen: Handle va_start in the entry block
Also fix a small copy-paste bug in X86ISelLowering where Chain should have been used in place of DAG.getEntryToken(). Fixes PR20828. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216929 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
1c7650f67c
commit
f93099eb1c
@ -75,34 +75,26 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
|
|||||||
// instruction values that are used outside of the block that defines
|
// instruction values that are used outside of the block that defines
|
||||||
// them.
|
// them.
|
||||||
Function::const_iterator BB = Fn->begin(), EB = Fn->end();
|
Function::const_iterator BB = Fn->begin(), EB = Fn->end();
|
||||||
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end(); I != E; ++I)
|
|
||||||
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
|
|
||||||
// Don't fold inalloca allocas or other dynamic allocas into the initial
|
|
||||||
// stack frame allocation, even if they are in the entry block.
|
|
||||||
if (!AI->isStaticAlloca())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
|
|
||||||
Type *Ty = AI->getAllocatedType();
|
|
||||||
uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
|
|
||||||
unsigned Align =
|
|
||||||
std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
|
|
||||||
AI->getAlignment());
|
|
||||||
|
|
||||||
TySize *= CUI->getZExtValue(); // Get total allocated size.
|
|
||||||
if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
|
|
||||||
|
|
||||||
StaticAllocaMap[AI] =
|
|
||||||
MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (; BB != EB; ++BB)
|
for (; BB != EB; ++BB)
|
||||||
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
|
for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
|
||||||
I != E; ++I) {
|
I != E; ++I) {
|
||||||
// Look for dynamic allocas.
|
|
||||||
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
|
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
|
||||||
if (!AI->isStaticAlloca()) {
|
// Static allocas can be folded into the initial stack frame adjustment.
|
||||||
|
if (AI->isStaticAlloca()) {
|
||||||
|
const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
|
||||||
|
Type *Ty = AI->getAllocatedType();
|
||||||
|
uint64_t TySize = TLI->getDataLayout()->getTypeAllocSize(Ty);
|
||||||
|
unsigned Align =
|
||||||
|
std::max((unsigned)TLI->getDataLayout()->getPrefTypeAlignment(Ty),
|
||||||
|
AI->getAlignment());
|
||||||
|
|
||||||
|
TySize *= CUI->getZExtValue(); // Get total allocated size.
|
||||||
|
if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
|
||||||
|
|
||||||
|
StaticAllocaMap[AI] =
|
||||||
|
MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
|
||||||
|
|
||||||
|
} else {
|
||||||
unsigned Align = std::max(
|
unsigned Align = std::max(
|
||||||
(unsigned)TLI->getDataLayout()->getPrefTypeAlignment(
|
(unsigned)TLI->getDataLayout()->getPrefTypeAlignment(
|
||||||
AI->getAllocatedType()),
|
AI->getAllocatedType()),
|
||||||
|
@ -2547,11 +2547,11 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
|
|||||||
for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
|
for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
|
||||||
unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
|
unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
|
||||||
LiveGPRs.push_back(
|
LiveGPRs.push_back(
|
||||||
DAG.getCopyFromReg(DAG.getEntryNode(), dl, GPR, MVT::i64));
|
DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
|
||||||
}
|
}
|
||||||
if (!ArgXMMs.empty()) {
|
if (!ArgXMMs.empty()) {
|
||||||
unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
|
unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
|
||||||
ALVal = DAG.getCopyFromReg(DAG.getEntryNode(), dl, AL, MVT::i8);
|
ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
|
||||||
for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
|
for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
|
||||||
unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
|
unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
|
||||||
LiveXMMRegs.push_back(
|
LiveXMMRegs.push_back(
|
||||||
|
@ -111,3 +111,22 @@ entry:
|
|||||||
%tmp = va_arg i8** %ap, i32
|
%tmp = va_arg i8** %ap, i32
|
||||||
ret i32 %tmp
|
ret i32 %tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
define void @sret_arg(i32* sret %agg.result, i8* nocapture readnone %format, ...) {
|
||||||
|
entry:
|
||||||
|
%ap = alloca i8*
|
||||||
|
%ap_i8 = bitcast i8** %ap to i8*
|
||||||
|
call void @llvm.va_start(i8* %ap_i8)
|
||||||
|
%tmp = va_arg i8** %ap, i32
|
||||||
|
store i32 %tmp, i32* %agg.result
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
; CHECK-LABEL: sret_arg:
|
||||||
|
; CHECK: pushq
|
||||||
|
; CHECK-DAG: movq %r9, 40(%rsp)
|
||||||
|
; CHECK-DAG: movq %r8, 32(%rsp)
|
||||||
|
; CHECK: movl 32(%rsp), %[[tmp:[^ ]*]]
|
||||||
|
; CHECK: movl %[[tmp]], (%[[sret:[^ ]*]])
|
||||||
|
; CHECK: movq %[[sret]], %rax
|
||||||
|
; CHECK: popq
|
||||||
|
; CHECK: retq
|
||||||
|
Loading…
x
Reference in New Issue
Block a user