Make constant arrays that are passed to functions as const.

In theory this allows the compiler to skip materializing the array on
the stack. In practice clang often fails to do that, but that's a
different story. NFC.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@231571 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Benjamin Kramer 2015-03-07 17:41:00 +00:00
parent 62ffaaac7c
commit ed0266d8ee
8 changed files with 30 additions and 40 deletions

View File

@ -1936,7 +1936,7 @@ SDNode *SelectionDAGISel::Select_INLINEASM(SDNode *N) {
std::vector<SDValue> Ops(N->op_begin(), N->op_end()); std::vector<SDValue> Ops(N->op_begin(), N->op_end());
SelectInlineAsmMemoryOperands(Ops); SelectInlineAsmMemoryOperands(Ops);
EVT VTs[] = { MVT::Other, MVT::Glue }; const EVT VTs[] = {MVT::Other, MVT::Glue};
SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N), VTs, Ops); SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N), VTs, Ops);
New->setNodeId(-1); New->setNodeId(-1);
return New.getNode(); return New.getNode();

View File

@ -122,12 +122,11 @@ void MCELFStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
// If neither T1 < T2 nor T2 < T1 according to this ordering, use T2 (the user // If neither T1 < T2 nor T2 < T1 according to this ordering, use T2 (the user
// provided type). // provided type).
static unsigned CombineSymbolTypes(unsigned T1, unsigned T2) { static unsigned CombineSymbolTypes(unsigned T1, unsigned T2) {
unsigned TypeOrdering[] = {ELF::STT_NOTYPE, ELF::STT_OBJECT, ELF::STT_FUNC, for (unsigned Type : {ELF::STT_NOTYPE, ELF::STT_OBJECT, ELF::STT_FUNC,
ELF::STT_GNU_IFUNC, ELF::STT_TLS}; ELF::STT_GNU_IFUNC, ELF::STT_TLS}) {
for (unsigned i = 0; i != array_lengthof(TypeOrdering); ++i) { if (T1 == Type)
if (T1 == TypeOrdering[i])
return T2; return T2;
if (T2 == TypeOrdering[i]) if (T2 == Type)
return T1; return T1;
} }

View File

@ -1055,7 +1055,7 @@ SDNode *AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs,
SDValue Ops[] = {N->getOperand(2), // Mem operand; SDValue Ops[] = {N->getOperand(2), // Mem operand;
Chain}; Chain};
EVT ResTys[] = {MVT::Untyped, MVT::Other}; const EVT ResTys[] = {MVT::Untyped, MVT::Other};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
SDValue SuperReg = SDValue(Ld, 0); SDValue SuperReg = SDValue(Ld, 0);
@ -1077,8 +1077,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
N->getOperand(2), // Incremental N->getOperand(2), // Incremental
Chain}; Chain};
EVT ResTys[] = {MVT::i64, // Type of the write back register const EVT ResTys[] = {MVT::i64, // Type of the write back register
MVT::Untyped, MVT::Other}; MVT::Untyped, MVT::Other};
SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); SDNode *Ld = CurDAG->getMachineNode(Opc, dl, ResTys, Ops);
@ -1119,8 +1119,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
unsigned Opc) { unsigned Opc) {
SDLoc dl(N); SDLoc dl(N);
EVT VT = N->getOperand(2)->getValueType(0); EVT VT = N->getOperand(2)->getValueType(0);
EVT ResTys[] = {MVT::i64, // Type of the write back register const EVT ResTys[] = {MVT::i64, // Type of the write back register
MVT::Other}; // Type for the Chain MVT::Other}; // Type for the Chain
// Form a REG_SEQUENCE to force register allocation. // Form a REG_SEQUENCE to force register allocation.
bool Is128Bit = VT.getSizeInBits() == 128; bool Is128Bit = VT.getSizeInBits() == 128;
@ -1184,7 +1184,7 @@ SDNode *AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs); SDValue RegSeq = createQTuple(Regs);
EVT ResTys[] = {MVT::Untyped, MVT::Other}; const EVT ResTys[] = {MVT::Untyped, MVT::Other};
unsigned LaneNo = unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue(); cast<ConstantSDNode>(N->getOperand(NumVecs + 2))->getZExtValue();
@ -1224,8 +1224,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs); SDValue RegSeq = createQTuple(Regs);
EVT ResTys[] = {MVT::i64, // Type of the write back register const EVT ResTys[] = {MVT::i64, // Type of the write back register
MVT::Untyped, MVT::Other}; MVT::Untyped, MVT::Other};
unsigned LaneNo = unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue(); cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();
@ -1309,8 +1309,8 @@ SDNode *AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
SDValue RegSeq = createQTuple(Regs); SDValue RegSeq = createQTuple(Regs);
EVT ResTys[] = {MVT::i64, // Type of the write back register const EVT ResTys[] = {MVT::i64, // Type of the write back register
MVT::Other}; MVT::Other};
unsigned LaneNo = unsigned LaneNo =
cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue(); cast<ConstantSDNode>(N->getOperand(NumVecs + 1))->getZExtValue();

View File

@ -3086,7 +3086,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
// Store exclusive double return a i32 value which is the return status // Store exclusive double return a i32 value which is the return status
// of the issued store. // of the issued store.
EVT ResTys[] = { MVT::i32, MVT::Other }; const EVT ResTys[] = {MVT::i32, MVT::Other};
bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2(); bool isThumb = Subtarget->isThumb() && Subtarget->hasThumb2();
// Place arguments in the right order. // Place arguments in the right order.

View File

@ -569,14 +569,12 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::LOAD); setTargetDAGCombine(ISD::LOAD);
// It is legal to extload from v4i8 to v4i16 or v4i32. // It is legal to extload from v4i8 to v4i16 or v4i32.
MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8, for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
MVT::v4i16, MVT::v2i16, MVT::v2i32}) {
MVT::v2i32};
for (unsigned i = 0; i < 6; ++i) {
for (MVT VT : MVT::integer_vector_valuetypes()) { for (MVT VT : MVT::integer_vector_valuetypes()) {
setLoadExtAction(ISD::EXTLOAD, VT, Tys[i], Legal); setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
setLoadExtAction(ISD::ZEXTLOAD, VT, Tys[i], Legal); setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
setLoadExtAction(ISD::SEXTLOAD, VT, Tys[i], Legal); setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
} }
} }
} }

View File

@ -160,11 +160,8 @@ PPCRegisterInfo::getNoPreservedMask() const {
} }
void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const { void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
unsigned PseudoRegs[] = { PPC::ZERO, PPC::ZERO8, PPC::RM }; for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM})
for (unsigned i = 0, ie = array_lengthof(PseudoRegs); i != ie; ++i) { Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32));
unsigned Reg = PseudoRegs[i];
Mask[Reg / 32] &= ~(1u << (Reg % 32));
}
} }
BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {

View File

@ -172,16 +172,12 @@ SITargetLowering::SITargetLowering(TargetMachine &TM,
setOperationAction(ISD::UDIV, MVT::i64, Expand); setOperationAction(ISD::UDIV, MVT::i64, Expand);
setOperationAction(ISD::UREM, MVT::i64, Expand); setOperationAction(ISD::UREM, MVT::i64, Expand);
// We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
MVT VecTypes[] = {
MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32
};
setOperationAction(ISD::SELECT_CC, MVT::i1, Expand); setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
setOperationAction(ISD::SELECT, MVT::i1, Promote); setOperationAction(ISD::SELECT, MVT::i1, Promote);
for (MVT VT : VecTypes) { // We only support LOAD/STORE and vector manipulation ops for vectors
// with > 4 elements.
for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32}) {
for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) { for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
switch(Op) { switch(Op) {
case ISD::LOAD: case ISD::LOAD:

View File

@ -62,8 +62,8 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
#ifndef NDEBUG #ifndef NDEBUG
// If the base register might conflict with our physical registers, bail out. // If the base register might conflict with our physical registers, bail out.
unsigned ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI, const unsigned ClobberSet[] = {X86::RCX, X86::RAX, X86::RDI,
X86::ECX, X86::EAX, X86::EDI}; X86::ECX, X86::EAX, X86::EDI};
assert(!isBaseRegConflictPossible(DAG, ClobberSet)); assert(!isBaseRegConflictPossible(DAG, ClobberSet));
#endif #endif
@ -228,8 +228,8 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy(
return SDValue(); return SDValue();
// If the base register might conflict with our physical registers, bail out. // If the base register might conflict with our physical registers, bail out.
unsigned ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI, const unsigned ClobberSet[] = {X86::RCX, X86::RSI, X86::RDI,
X86::ECX, X86::ESI, X86::EDI}; X86::ECX, X86::ESI, X86::EDI};
if (isBaseRegConflictPossible(DAG, ClobberSet)) if (isBaseRegConflictPossible(DAG, ClobberSet))
return SDValue(); return SDValue();