X86: Canonicalize access to function attributes, NFC

Canonicalize access to function attributes to use the simpler API.

getAttributes().getAttribute(AttributeSet::FunctionIndex, Kind)
  => getFnAttribute(Kind)

getAttributes().hasAttribute(AttributeSet::FunctionIndex, Kind)
  => hasFnAttribute(Kind)

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@229214 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Duncan P. N. Exon Smith 2015-02-14 01:59:52 +00:00
parent 0f7084338a
commit 894c8c514a
8 changed files with 33 additions and 58 deletions

View File

@ -163,11 +163,9 @@ bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
return true;
// Don't do this when not optimizing for size.
AttributeSet FnAttrs = MF.getFunction()->getAttributes();
bool OptForSize =
FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize) ||
FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
MF.getFunction()->hasFnAttribute(Attribute::MinSize);
if (!OptForSize)
return false;

View File

@ -654,14 +654,13 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
// pointer, calls, or dynamic alloca then we do not need to adjust the
// stack pointer (we fit in the Red Zone). We also check that we don't
// push and pop from the stack.
if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::NoRedZone) &&
if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
!RegInfo->needsStackRealignment(MF) &&
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
!IsWin64 && // Win64 has no Red Zone
!usesTheStack(MF) && // Don't push and pop.
!MF.shouldSplitStack()) { // Regular stack
!MFI->hasVarSizedObjects() && // No dynamic alloca.
!MFI->adjustsStack() && // No calls.
!IsWin64 && // Win64 has no Red Zone
!usesTheStack(MF) && // Don't push and pop.
!MF.shouldSplitStack()) { // Regular stack
uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
if (HasFP) MinSize += SlotSize;
StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);

View File

@ -451,8 +451,7 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
void X86DAGToDAGISel::PreprocessISelDAG() {
// OptForSize is used in pattern predicates that isel is matching.
OptForSize = MF->getFunction()->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
OptForSize = MF->getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {

View File

@ -1845,8 +1845,7 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
MachineFunction &MF) const {
const Function *F = MF.getFunction();
if ((!IsMemset || ZeroMemset) &&
!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::NoImplicitFloat)) {
!F->hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(Subtarget->isUnalignedMemAccessFast() ||
((DstAlign == 0 || DstAlign >= 16) &&
@ -2402,8 +2401,7 @@ static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
}
const Function *Fn = MF.getFunction();
bool NoImplicitFloatOps = Fn->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
@ -2571,8 +2569,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
// Figure out if XMM registers are in use.
assert(!(MF.getTarget().Options.UseSoftFloat &&
Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::NoImplicitFloat)) &&
Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
"SSE register cannot be used when SSE is disabled!");
// 64-bit calling conventions support varargs and register parameters, so we
@ -3132,11 +3129,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
// unless we're building with the leopard linker or later, which
// automatically synthesizes these stubs.
OpFlags = X86II::MO_DARWIN_STUB;
} else if (Subtarget->isPICStyleRIPRel() &&
isa<Function>(GV) &&
cast<Function>(GV)->getAttributes().
hasAttribute(AttributeSet::FunctionIndex,
Attribute::NonLazyBind)) {
} else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
// If the function is marked as non-lazy, generate an indirect call
// which loads from the GOT directly. This avoids runtime overhead
// at the cost of eager binding (and one extra byte of encoding).
@ -6226,8 +6220,7 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
const Function *F = DAG.getMachineFunction().getFunction();
bool OptForSize = F->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
// Handle broadcasting a single constant scalar from the constant pool
// into a vector.
@ -12532,8 +12525,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
bool HasFp256 = Subtarget->hasFp256();
bool HasInt256 = Subtarget->hasInt256();
MachineFunction &MF = DAG.getMachineFunction();
bool OptForSize = MF.getFunction()->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
bool OptForSize =
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
// Check if we should use the experimental vector shuffle lowering. If so,
// delegate completely to that code path.
@ -15299,8 +15292,8 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
// if we're optimizing for size, however, as that'll allow better folding
// of memory operations.
if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
!DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
AttributeSet::FunctionIndex, Attribute::MinSize) &&
!DAG.getMachineFunction().getFunction()->hasFnAttribute(
Attribute::MinSize) &&
!Subtarget->isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
@ -17002,10 +16995,8 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!DAG.getTarget().Options.UseSoftFloat &&
!(DAG.getMachineFunction()
.getFunction()->getAttributes()
.hasAttribute(AttributeSet::FunctionIndex,
Attribute::NoImplicitFloat)) &&
!(DAG.getMachineFunction().getFunction()->hasFnAttribute(
Attribute::NoImplicitFloat)) &&
Subtarget->hasSSE1());
}
@ -24764,8 +24755,8 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
MachineFunction &MF = DAG.getMachineFunction();
bool OptForSize = MF.getFunction()->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
bool OptForSize =
MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
// SHLD/SHRD instructions have lower register pressure, but on some
// platforms they have higher latency than the equivalent
@ -25212,8 +25203,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
const Function *F = DAG.getMachineFunction().getFunction();
bool NoImplicitFloatOps = F->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
&& Subtarget->hasSSE2();
if ((VT.isVector() ||

View File

@ -4987,8 +4987,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
if (!MF.getFunction()->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;
@ -5063,8 +5062,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Unless optimizing for size, don't fold to avoid partial
// register update stalls
if (!MF.getFunction()->getAttributes().
hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) &&
if (!MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) &&
hasPartialRegUpdate(MI->getOpcode()))
return nullptr;

View File

@ -93,11 +93,8 @@ FunctionPass *llvm::createX86PadShortFunctions() {
/// runOnMachineFunction - Loop over all of the basic blocks, inserting
/// NOOP instructions before early exits.
bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) {
const AttributeSet &FnAttrs = MF.getFunction()->getAttributes();
if (FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize) ||
FnAttrs.hasAttribute(AttributeSet::FunctionIndex,
Attribute::MinSize)) {
if (MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
MF.getFunction()->hasFnAttribute(Attribute::MinSize)) {
return false;
}

View File

@ -445,10 +445,8 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
const Function *F = MF.getFunction();
unsigned StackAlign =
MF.getSubtarget().getFrameLowering()->getStackAlignment();
bool requiresRealignment =
((MFI->getMaxAlignment() > StackAlign) ||
F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::StackAlignment));
bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
F->hasFnAttribute(Attribute::StackAlignment));
// If we've requested that we force align the stack do so now.
if (ForceStackAlign)

View File

@ -116,11 +116,8 @@ X86TargetMachine::~X86TargetMachine() {}
const X86Subtarget *
X86TargetMachine::getSubtargetImpl(const Function &F) const {
AttributeSet FnAttrs = F.getAttributes();
Attribute CPUAttr =
FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
Attribute FSAttr =
FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
Attribute CPUAttr = F.getFnAttribute("target-cpu");
Attribute FSAttr = F.getFnAttribute("target-features");
std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
? CPUAttr.getValueAsString().str()
@ -134,8 +131,7 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const {
// function before we can generate a subtarget. We also need to use
// it as a key for the subtarget since that can be the only difference
// between two functions.
Attribute SFAttr =
FnAttrs.getAttribute(AttributeSet::FunctionIndex, "use-soft-float");
Attribute SFAttr = F.getFnAttribute("use-soft-float");
bool SoftFloat = !SFAttr.hasAttribute(Attribute::None)
? SFAttr.getValueAsString() == "true"
: Options.UseSoftFloat;