Merging r229413:

------------------------------------------------------------------------
r229413 | atrick | 2015-02-16 13:10:47 -0500 (Mon, 16 Feb 2015) | 16 lines

AArch64: Safely handle the incoming sret call argument.

This adds a safe interface to the machine independent InputArg struct
for accessing the index of the original (IR-level) argument. When a
non-native return type is lowered, we generate the hidden
machine-level sret argument on-the-fly. Before this fix, we were
representing this argument as OrigArgIndex == 0, which is an outright
lie. In particular this crashed in the AArch64 backend where we
actually try to access the type of the original argument.

Now we use a sentinel value for machine arguments that have no
original argument index. AArch64, ARM, Mips, and PPC now check for this
case before accessing the original argument.

Fixes <rdar://19792160> Null pointer assertion in AArch64TargetLowering

------------------------------------------------------------------------

git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_36@236856 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tom Stellard 2015-05-08 14:13:47 +00:00
parent f2a738b21e
commit 6f6912321a
10 changed files with 69 additions and 29 deletions

View File

@ -134,6 +134,8 @@ namespace ISD {
/// Index original Function's argument.
unsigned OrigArgIndex;
/// Sentinel value for implicit machine-level input arguments.
static const unsigned NoArgIndex = UINT_MAX;
/// Offset in bytes of current input value relative to the beginning of
/// original argument. E.g. if argument was splitted into four 32 bit
@ -147,6 +149,15 @@ namespace ISD {
VT = vt.getSimpleVT();
ArgVT = argvt;
}
bool isOrigArg() const {
return OrigArgIndex != NoArgIndex;
}
unsigned getOrigArgIndex() const {
assert(OrigArgIndex != NoArgIndex && "Implicit machine-level argument");
return OrigArgIndex;
}
};
/// OutputArg - This struct carries flags and a value for a

View File

@ -7638,7 +7638,8 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
ISD::ArgFlagsTy Flags;
Flags.setSRet();
MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 0, 0);
ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
ISD::InputArg::NoArgIndex, 0);
Ins.push_back(RetArg);
}

View File

@ -2031,18 +2031,19 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
unsigned CurArgIdx = 0;
for (unsigned i = 0; i != NumArgs; ++i) {
MVT ValVT = Ins[i].VT;
std::advance(CurOrigArg, Ins[i].OrigArgIndex - CurArgIdx);
CurArgIdx = Ins[i].OrigArgIndex;
// Get type of the original argument.
EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
// If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
ValVT = MVT::i8;
else if (ActualMVT == MVT::i16)
ValVT = MVT::i16;
if (Ins[i].isOrigArg()) {
std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
CurArgIdx = Ins[i].getOrigArgIndex();
// Get type of the original argument.
EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
// If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
ValVT = MVT::i8;
else if (ActualMVT == MVT::i16)
ValVT = MVT::i16;
}
CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
bool Res =
AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);

View File

@ -3092,8 +3092,11 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx);
CurArgIdx = Ins[VA.getValNo()].OrigArgIndex;
if (Ins[VA.getValNo()].isOrigArg()) {
std::advance(CurOrigArg,
Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
}
// Arguments stored in registers.
if (VA.isRegLoc()) {
EVT RegVT = VA.getLocVT();
@ -3173,7 +3176,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
assert(VA.isMemLoc());
assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
int index = ArgLocs[i].getValNo();
int index = VA.getValNo();
// Some Ins[] entries become multiple ArgLoc[] entries.
// Process them only once.
@ -3186,6 +3189,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
// Since they could be overwritten by lowering of arguments in case of
// a tail call.
if (Flags.isByVal()) {
assert(Ins[index].isOrigArg() &&
"Byval arguments cannot be implicit");
unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());

View File

@ -132,8 +132,8 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
continue;
}
assert(Ins[i].OrigArgIndex < MF.getFunction()->arg_size());
std::advance(FuncArg, Ins[i].OrigArgIndex);
assert(Ins[i].getOrigArgIndex() < MF.getFunction()->arg_size());
std::advance(FuncArg, Ins[i].getOrigArgIndex());
OriginalArgWasF128.push_back(
originalTypeIsF128(FuncArg->getType(), nullptr));

View File

@ -2933,13 +2933,16 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
std::advance(FuncArg, Ins[i].OrigArgIndex - CurArgIdx);
CurArgIdx = Ins[i].OrigArgIndex;
if (Ins[i].isOrigArg()) {
std::advance(FuncArg, Ins[i].getOrigArgIndex() - CurArgIdx);
CurArgIdx = Ins[i].getOrigArgIndex();
}
EVT ValVT = VA.getValVT();
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool IsRegLoc = VA.isRegLoc();
if (Flags.isByVal()) {
assert(Ins[i].isOrigArg() && "Byval arguments cannot be implicit");
unsigned FirstByValReg, LastByValReg;
unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);

View File

@ -2688,9 +2688,10 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
unsigned ObjSize = ObjectVT.getStoreSize();
unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
CurArgIdx = Ins[ArgNo].OrigArgIndex;
if (Ins[ArgNo].isOrigArg()) {
std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
CurArgIdx = Ins[ArgNo].getOrigArgIndex();
}
/* Respect alignment of argument on the stack. */
unsigned Align =
CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
@ -2704,6 +2705,8 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
// FIXME the codegen can be much improved in some cases.
// We do not have to keep everything in memory.
if (Flags.isByVal()) {
assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
// ObjSize is the true size, ArgSize rounded up to multiple of registers.
ObjSize = Flags.getByValSize();
ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
@ -3064,9 +3067,10 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
unsigned ObjSize = ObjectVT.getSizeInBits()/8;
unsigned ArgSize = ObjSize;
ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
CurArgIdx = Ins[ArgNo].OrigArgIndex;
if (Ins[ArgNo].isOrigArg()) {
std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
CurArgIdx = Ins[ArgNo].getOrigArgIndex();
}
unsigned CurArgOffset = ArgOffset;
// Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
@ -3087,6 +3091,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// FIXME the codegen can be much improved in some cases.
// We do not have to keep everything in memory.
if (Flags.isByVal()) {
assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
// ObjSize is the true size, ArgSize rounded up to multiple of registers.
ObjSize = Flags.getByValSize();
ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;

View File

@ -1698,7 +1698,7 @@ SDValue R600TargetLowering::LowerFormalArguments(
// XXX - I think PartOffset should give you this, but it seems to give the
// size of the register which isn't useful.
unsigned ValBase = ArgLocs[In.OrigArgIndex].getLocMemOffset();
unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset();
unsigned PartOffset = VA.getLocMemOffset();
unsigned Offset = 36 + VA.getLocMemOffset();

View File

@ -450,7 +450,7 @@ SDValue SITargetLowering::LowerFormalArguments(
// We REALLY want the ORIGINAL number of vertex elements here, e.g. a
// three or five element vertex only needs three or five registers,
// NOT four or eigth.
Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
unsigned NumElements = ParamType->getVectorNumElements();
for (unsigned j = 0; j != NumElements; ++j) {
@ -533,7 +533,7 @@ SDValue SITargetLowering::LowerFormalArguments(
Offset, Ins[i].Flags.isSExt());
const PointerType *ParamTy =
dyn_cast<PointerType>(FType->getParamType(Ins[i].OrigArgIndex));
dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
// On SI local pointers are just offsets into LDS, so they are always
@ -568,7 +568,7 @@ SDValue SITargetLowering::LowerFormalArguments(
if (Arg.VT.isVector()) {
// Build a vector from the registers
Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
unsigned NumElements = ParamType->getVectorNumElements();
SmallVector<SDValue, 4> Regs;

View File

@ -0,0 +1,13 @@
; RUN: llc %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
;
; Handle implicit sret arguments that are generated on-the-fly during lowering.
; <rdar://19792160> Null pointer assertion in AArch64TargetLowering
; CHECK-LABEL: big_retval
; ... str or stp for the first 1024 bits
; CHECK: strb wzr, [x8, #128]
; CHECK: ret
define i1032 @big_retval() {
entry:
ret i1032 0
}