mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-14 00:32:55 +00:00
Remove the `hasFnAttr' method from Function.
The hasFnAttr method has been replaced by querying the Attributes explicitly. No intended functionality change. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@164725 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
b7abea0840
commit
2c18906118
@ -168,10 +168,10 @@ public:
|
||||
///
|
||||
void setAttributes(const AttrListPtr &attrs) { AttributeList = attrs; }
|
||||
|
||||
/// hasFnAttr - Return true if this function has the given attribute.
|
||||
bool hasFnAttr(Attributes N) const {
|
||||
// Function Attributes are stored at ~0 index
|
||||
return AttributeList.paramHasAttr(~0U, N);
|
||||
/// getFnAttributes - Return the function attributes for querying.
|
||||
///
|
||||
Attributes getFnAttributes() const {
|
||||
return AttributeList.getFnAttributes();
|
||||
}
|
||||
|
||||
/// addFnAttr - Add function attributes to this function.
|
||||
@ -195,6 +195,11 @@ public:
|
||||
void setGC(const char *Str);
|
||||
void clearGC();
|
||||
|
||||
/// getParamAttributes - Return the parameter attributes for querying.
|
||||
Attributes getParamAttributes(unsigned Idx) const {
|
||||
return AttributeList.getParamAttributes(Idx);
|
||||
}
|
||||
|
||||
/// @brief Determine whether the function has the given attribute.
|
||||
bool paramHasAttr(unsigned i, Attributes attr) const {
|
||||
return AttributeList.paramHasAttr(i, attr);
|
||||
@ -213,7 +218,7 @@ public:
|
||||
|
||||
/// @brief Determine if the function does not access memory.
|
||||
bool doesNotAccessMemory() const {
|
||||
return hasFnAttr(Attribute::ReadNone);
|
||||
return getFnAttributes().hasReadNoneAttr();
|
||||
}
|
||||
void setDoesNotAccessMemory(bool DoesNotAccessMemory = true) {
|
||||
if (DoesNotAccessMemory) addFnAttr(Attribute::ReadNone);
|
||||
@ -222,7 +227,7 @@ public:
|
||||
|
||||
/// @brief Determine if the function does not access or only reads memory.
|
||||
bool onlyReadsMemory() const {
|
||||
return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly);
|
||||
return doesNotAccessMemory() || getFnAttributes().hasReadOnlyAttr();
|
||||
}
|
||||
void setOnlyReadsMemory(bool OnlyReadsMemory = true) {
|
||||
if (OnlyReadsMemory) addFnAttr(Attribute::ReadOnly);
|
||||
@ -231,7 +236,7 @@ public:
|
||||
|
||||
/// @brief Determine if the function cannot return.
|
||||
bool doesNotReturn() const {
|
||||
return hasFnAttr(Attribute::NoReturn);
|
||||
return getFnAttributes().hasNoReturnAttr();
|
||||
}
|
||||
void setDoesNotReturn(bool DoesNotReturn = true) {
|
||||
if (DoesNotReturn) addFnAttr(Attribute::NoReturn);
|
||||
@ -240,7 +245,7 @@ public:
|
||||
|
||||
/// @brief Determine if the function cannot unwind.
|
||||
bool doesNotThrow() const {
|
||||
return hasFnAttr(Attribute::NoUnwind);
|
||||
return getFnAttributes().hasNoUnwindAttr();
|
||||
}
|
||||
void setDoesNotThrow(bool DoesNotThrow = true) {
|
||||
if (DoesNotThrow) addFnAttr(Attribute::NoUnwind);
|
||||
@ -250,7 +255,7 @@ public:
|
||||
/// @brief True if the ABI mandates (or the user requested) that this
|
||||
/// function be in a unwind table.
|
||||
bool hasUWTable() const {
|
||||
return hasFnAttr(Attribute::UWTable);
|
||||
return getFnAttributes().hasUWTableAttr();
|
||||
}
|
||||
void setHasUWTable(bool HasUWTable = true) {
|
||||
if (HasUWTable)
|
||||
|
@ -196,7 +196,7 @@ void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) {
|
||||
// as volatile if they are live across a setjmp call, and they probably
|
||||
// won't do this in callers.
|
||||
exposesReturnsTwice = F->callsFunctionThatReturnsTwice() &&
|
||||
!F->hasFnAttr(Attribute::ReturnsTwice);
|
||||
!F->getFnAttributes().hasReturnsTwiceAttr();
|
||||
|
||||
// Look at the size of the callee.
|
||||
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
|
||||
|
@ -128,7 +128,7 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
|
||||
public:
|
||||
CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold)
|
||||
: TD(TD), F(Callee), Threshold(Threshold), Cost(0),
|
||||
AlwaysInline(F.hasFnAttr(Attribute::AlwaysInline)),
|
||||
AlwaysInline(F.getFnAttributes().hasAlwaysInlineAttr()),
|
||||
IsCallerRecursive(false), IsRecursiveCall(false),
|
||||
ExposesReturnsTwice(false), HasDynamicAlloca(false), AllocatedSize(0),
|
||||
NumInstructions(0), NumVectorInstructions(0),
|
||||
@ -613,7 +613,7 @@ bool CallAnalyzer::visitStore(StoreInst &I) {
|
||||
|
||||
bool CallAnalyzer::visitCallSite(CallSite CS) {
|
||||
if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() &&
|
||||
!F.hasFnAttr(Attribute::ReturnsTwice)) {
|
||||
!F.getFnAttributes().hasReturnsTwiceAttr()) {
|
||||
// This aborts the entire analysis.
|
||||
ExposesReturnsTwice = true;
|
||||
return false;
|
||||
@ -1043,7 +1043,7 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee,
|
||||
// something else. Don't inline functions marked noinline or call sites
|
||||
// marked noinline.
|
||||
if (!Callee || Callee->mayBeOverridden() ||
|
||||
Callee->hasFnAttr(Attribute::NoInline) || CS.isNoInline())
|
||||
Callee->getFnAttributes().hasNoInlineAttr() || CS.isNoInline())
|
||||
return llvm::InlineCost::getNever();
|
||||
|
||||
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
|
||||
|
@ -327,7 +327,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
|
||||
return 0;
|
||||
|
||||
if (LIOffs+NewLoadByteSize > MemLocEnd &&
|
||||
LI->getParent()->getParent()->hasFnAttr(Attribute::AddressSafety)) {
|
||||
LI->getParent()->getParent()->getFnAttributes().hasAddressSafetyAttr()){
|
||||
// We will be reading past the location accessed by the original program.
|
||||
// While this is safe in a regular build, Address Safety analysis tools
|
||||
// may start reporting false warnings. So, don't do widening.
|
||||
|
@ -574,7 +574,7 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1,
|
||||
// instructions that would be deleted in the merge.
|
||||
MachineFunction *MF = MBB1->getParent();
|
||||
if (EffectiveTailLen >= 2 &&
|
||||
MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
|
||||
MF->getFunction()->getFnAttributes().hasOptimizeForSizeAttr() &&
|
||||
(I1 == MBB1->begin() || I2 == MBB2->begin()))
|
||||
return true;
|
||||
|
||||
|
@ -373,7 +373,7 @@ bool CodePlacementOpt::OptimizeIntraLoopEdges(MachineFunction &MF) {
|
||||
///
|
||||
bool CodePlacementOpt::AlignLoops(MachineFunction &MF) {
|
||||
const Function *F = MF.getFunction();
|
||||
if (F->hasFnAttr(Attribute::OptimizeForSize))
|
||||
if (F->getFnAttributes().hasOptimizeForSizeAttr())
|
||||
return false;
|
||||
|
||||
unsigned Align = TLI->getPrefLoopAlignment();
|
||||
|
@ -1013,7 +1013,7 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
|
||||
// exclusively on the loop info here so that we can align backedges in
|
||||
// unnatural CFGs and backedges that were introduced purely because of the
|
||||
// loop rotations done during this layout pass.
|
||||
if (F.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
|
||||
if (F.getFunction()->getFnAttributes().hasOptimizeForSizeAttr())
|
||||
return;
|
||||
unsigned Align = TLI->getPrefLoopAlignment();
|
||||
if (!Align)
|
||||
|
@ -59,13 +59,13 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
|
||||
RegInfo = 0;
|
||||
MFInfo = 0;
|
||||
FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering());
|
||||
if (Fn->hasFnAttr(Attribute::StackAlignment))
|
||||
if (Fn->getFnAttributes().hasStackAlignmentAttr())
|
||||
FrameInfo->ensureMaxAlignment(Fn->getAttributes().
|
||||
getFnAttributes().getStackAlignment());
|
||||
ConstantPool = new (Allocator) MachineConstantPool(TM.getTargetData());
|
||||
Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
|
||||
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
|
||||
if (!Fn->hasFnAttr(Attribute::OptimizeForSize))
|
||||
if (!Fn->getFnAttributes().hasOptimizeForSizeAttr())
|
||||
Alignment = std::max(Alignment,
|
||||
TM.getTargetLowering()->getPrefFunctionAlignment());
|
||||
FunctionNumber = FunctionNum;
|
||||
|
@ -96,7 +96,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
|
||||
placeCSRSpillsAndRestores(Fn);
|
||||
|
||||
// Add the code to save and restore the callee saved registers
|
||||
if (!F->hasFnAttr(Attribute::Naked))
|
||||
if (!F->getFnAttributes().hasNakedAttr())
|
||||
insertCSRSpillsAndRestores(Fn);
|
||||
|
||||
// Allow the target machine to make final modifications to the function
|
||||
@ -111,7 +111,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) {
|
||||
// called functions. Because of this, calculateCalleeSavedRegisters()
|
||||
// must be called before this function in order to set the AdjustsStack
|
||||
// and MaxCallFrameSize variables.
|
||||
if (!F->hasFnAttr(Attribute::Naked))
|
||||
if (!F->getFnAttributes().hasNakedAttr())
|
||||
insertPrologEpilogCode(Fn);
|
||||
|
||||
// Replace all MO_FrameIndex operands with physical register references
|
||||
@ -221,7 +221,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) {
|
||||
return;
|
||||
|
||||
// In Naked functions we aren't going to save any registers.
|
||||
if (Fn.getFunction()->hasFnAttr(Attribute::Naked))
|
||||
if (Fn.getFunction()->getFnAttributes().hasNakedAttr())
|
||||
return;
|
||||
|
||||
std::vector<CalleeSavedInfo> CSI;
|
||||
|
@ -3521,7 +3521,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
|
||||
bool DstAlignCanChange = false;
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
|
||||
bool OptSize = MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr();
|
||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
|
||||
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
|
||||
DstAlignCanChange = true;
|
||||
@ -3614,7 +3614,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
|
||||
bool DstAlignCanChange = false;
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
|
||||
bool OptSize = MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr();
|
||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
|
||||
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
|
||||
DstAlignCanChange = true;
|
||||
@ -3692,7 +3692,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
|
||||
bool DstAlignCanChange = false;
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
|
||||
bool OptSize = MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr();
|
||||
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
|
||||
if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
|
||||
DstAlignCanChange = true;
|
||||
|
@ -4400,7 +4400,7 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS,
|
||||
return DAG.getConstantFP(1.0, LHS.getValueType());
|
||||
|
||||
const Function *F = DAG.getMachineFunction().getFunction();
|
||||
if (!F->hasFnAttr(Attribute::OptimizeForSize) ||
|
||||
if (!F->getFnAttributes().hasOptimizeForSizeAttr() ||
|
||||
// If optimizing for size, don't insert too many multiplies. This
|
||||
// inserts up to 5 multiplies.
|
||||
CountPopulation_32(Val)+Log2_32(Val) < 7) {
|
||||
|
@ -137,10 +137,10 @@ bool StackProtector::ContainsProtectableArray(Type *Ty, bool InStruct) const {
|
||||
/// add a guard variable to functions that call alloca, and functions with
|
||||
/// buffers larger than SSPBufferSize bytes.
|
||||
bool StackProtector::RequiresStackProtector() const {
|
||||
if (F->hasFnAttr(Attribute::StackProtectReq))
|
||||
if (F->getFnAttributes().hasStackProtectReqAttr())
|
||||
return true;
|
||||
|
||||
if (!F->hasFnAttr(Attribute::StackProtect))
|
||||
if (!F->getFnAttributes().hasStackProtectAttr())
|
||||
return false;
|
||||
|
||||
for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
|
||||
|
@ -552,7 +552,7 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF,
|
||||
// compensate for the duplication.
|
||||
unsigned MaxDuplicateCount;
|
||||
if (TailDuplicateSize.getNumOccurrences() == 0 &&
|
||||
MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
|
||||
MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr())
|
||||
MaxDuplicateCount = 1;
|
||||
else
|
||||
MaxDuplicateCount = TailDuplicateSize;
|
||||
|
@ -2996,7 +2996,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
||||
// instructions).
|
||||
if (Latency > 0 && Subtarget.isThumb2()) {
|
||||
const MachineFunction *MF = DefMI->getParent()->getParent();
|
||||
if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize))
|
||||
if (MF->getFunction()->getFnAttributes().hasOptimizeForSizeAttr())
|
||||
--Latency;
|
||||
}
|
||||
return Latency;
|
||||
|
@ -562,7 +562,7 @@ needsStackRealignment(const MachineFunction &MF) const {
|
||||
const Function *F = MF.getFunction();
|
||||
unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
|
||||
bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
|
||||
F->hasFnAttr(Attribute::StackAlignment));
|
||||
F->getFnAttributes().hasStackAlignmentAttr());
|
||||
|
||||
return requiresRealignment && canRealignStack(MF);
|
||||
}
|
||||
|
@ -1151,7 +1151,7 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) {
|
||||
return;
|
||||
|
||||
// Naked functions don't spill callee-saved registers.
|
||||
if (MF.getFunction()->hasFnAttr(Attribute::Naked))
|
||||
if (MF.getFunction()->getFnAttributes().hasNakedAttr())
|
||||
return;
|
||||
|
||||
// We are planning to use NEON instructions vst1 / vld1.
|
||||
|
@ -6326,7 +6326,7 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const {
|
||||
UnitSize = 2;
|
||||
} else {
|
||||
// Check whether we can use NEON instructions.
|
||||
if (!MF->getFunction()->hasFnAttr(Attribute::NoImplicitFloat) &&
|
||||
if (!MF->getFunction()->getFnAttributes().hasNoImplicitFloatAttr() &&
|
||||
Subtarget->hasNEON()) {
|
||||
if ((Align % 16 == 0) && SizeVal >= 16) {
|
||||
ldrOpc = ARM::VLD1q32wb_fixed;
|
||||
@ -9060,7 +9060,7 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
|
||||
|
||||
// See if we can use NEON instructions for this...
|
||||
if (IsZeroVal &&
|
||||
!F->hasFnAttr(Attribute::NoImplicitFloat) &&
|
||||
!F->getFnAttributes().hasNoImplicitFloatAttr() &&
|
||||
Subtarget->hasNEON()) {
|
||||
if (memOpAlign(SrcAlign, DstAlign, 16) && Size >= 16) {
|
||||
return MVT::v4i32;
|
||||
|
@ -193,7 +193,7 @@ void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const {
|
||||
// to adjust the stack pointer (we fit in the Red Zone). For 64-bit
|
||||
// SVR4, we also require a stack frame if we need to spill the CR,
|
||||
// since this spill area is addressed relative to the stack pointer.
|
||||
bool DisableRedZone = MF.getFunction()->hasFnAttr(Attribute::NoRedZone);
|
||||
bool DisableRedZone = MF.getFunction()->getFnAttributes().hasNoRedZoneAttr();
|
||||
// FIXME SVR4 The 32-bit SVR4 ABI has no red zone. However, it can
|
||||
// still generate stackless code if all local vars are reg-allocated.
|
||||
// Try: (FrameSize <= 224
|
||||
@ -255,7 +255,7 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
|
||||
|
||||
// Naked functions have no stack frame pushed, so we don't have a frame
|
||||
// pointer.
|
||||
if (MF.getFunction()->hasFnAttr(Attribute::Naked))
|
||||
if (MF.getFunction()->getFnAttributes().hasNakedAttr())
|
||||
return false;
|
||||
|
||||
return MF.getTarget().Options.DisableFramePointerElim(MF) ||
|
||||
|
@ -6002,7 +6002,7 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
|
||||
bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) ||
|
||||
MFI->hasVarSizedObjects()) &&
|
||||
MFI->getStackSize() &&
|
||||
!MF.getFunction()->hasFnAttr(Attribute::Naked);
|
||||
!MF.getFunction()->getFnAttributes().hasNakedAttr();
|
||||
unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) :
|
||||
(is31 ? PPC::R31 : PPC::R1);
|
||||
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
|
||||
|
@ -596,7 +596,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
// to Offset to get the correct offset.
|
||||
// Naked functions have stack size 0, although getStackSize may not reflect that
|
||||
// because we didn't call all the pieces that compute it for naked functions.
|
||||
if (!MF.getFunction()->hasFnAttr(Attribute::Naked))
|
||||
if (!MF.getFunction()->getFnAttributes().hasNakedAttr())
|
||||
Offset += MFI->getStackSize();
|
||||
|
||||
// If we can, encode the offset directly into the instruction. If this is a
|
||||
|
@ -674,7 +674,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
// function, and use up to 128 bytes of stack space, don't have a frame
|
||||
// pointer, calls, or dynamic alloca then we do not need to adjust the
|
||||
// stack pointer (we fit in the Red Zone).
|
||||
if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
|
||||
if (Is64Bit && !Fn->getFnAttributes().hasNoRedZoneAttr() &&
|
||||
!RegInfo->needsStackRealignment(MF) &&
|
||||
!MFI->hasVarSizedObjects() && // No dynamic alloca.
|
||||
!MFI->adjustsStack() && // No calls.
|
||||
|
@ -428,7 +428,7 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
|
||||
|
||||
void X86DAGToDAGISel::PreprocessISelDAG() {
|
||||
// OptForSize is used in pattern predicates that isel is matching.
|
||||
OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize);
|
||||
OptForSize = MF->getFunction()->getFnAttributes().hasOptimizeForSizeAttr();
|
||||
|
||||
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
|
||||
E = CurDAG->allnodes_end(); I != E; ) {
|
||||
|
@ -1342,7 +1342,7 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size,
|
||||
// cases like PR2962. This should be removed when PR2962 is fixed.
|
||||
const Function *F = MF.getFunction();
|
||||
if (IsZeroVal &&
|
||||
!F->hasFnAttr(Attribute::NoImplicitFloat)) {
|
||||
!F->getFnAttributes().hasNoImplicitFloatAttr()) {
|
||||
if (Size >= 16 &&
|
||||
(Subtarget->isUnalignedMemAccessFast() ||
|
||||
((DstAlign == 0 || DstAlign >= 16) &&
|
||||
@ -2010,7 +2010,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs,
|
||||
TotalNumIntRegs);
|
||||
|
||||
bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat);
|
||||
bool NoImplicitFloatOps = Fn->getFnAttributes().hasNoImplicitFloatAttr();
|
||||
assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
|
||||
"SSE register cannot be used when SSE is disabled!");
|
||||
assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat &&
|
||||
@ -2486,7 +2486,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
OpFlags = X86II::MO_DARWIN_STUB;
|
||||
} else if (Subtarget->isPICStyleRIPRel() &&
|
||||
isa<Function>(GV) &&
|
||||
cast<Function>(GV)->hasFnAttr(Attribute::NonLazyBind)) {
|
||||
cast<Function>(GV)->getFnAttributes().hasNonLazyBindAttr()) {
|
||||
// If the function is marked as non-lazy, generate an indirect call
|
||||
// which loads from the GOT directly. This avoids runtime overhead
|
||||
// at the cost of eager binding (and one extra byte of encoding).
|
||||
@ -6629,7 +6629,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
|
||||
bool HasAVX = Subtarget->hasAVX();
|
||||
bool HasAVX2 = Subtarget->hasAVX2();
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize);
|
||||
bool OptForSize = MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr();
|
||||
|
||||
assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
|
||||
|
||||
@ -9669,7 +9669,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
|
||||
// Sanity Check: Make sure using fp_offset makes sense.
|
||||
assert(!getTargetMachine().Options.UseSoftFloat &&
|
||||
!(DAG.getMachineFunction()
|
||||
.getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) &&
|
||||
.getFunction()->getFnAttributes().hasNoImplicitFloatAttr()) &&
|
||||
Subtarget->hasSSE1());
|
||||
}
|
||||
|
||||
@ -15438,7 +15438,7 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
|
||||
return SDValue();
|
||||
|
||||
const Function *F = DAG.getMachineFunction().getFunction();
|
||||
bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat);
|
||||
bool NoImplicitFloatOps = F->getFnAttributes().hasNoImplicitFloatAttr();
|
||||
bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
|
||||
&& Subtarget->hasSSE2();
|
||||
if ((VT.isVector() ||
|
||||
|
@ -3820,7 +3820,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
|
||||
// Unless optimizing for size, don't fold to avoid partial
|
||||
// register update stalls
|
||||
if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
|
||||
if (!MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr() &&
|
||||
hasPartialRegUpdate(MI->getOpcode()))
|
||||
return 0;
|
||||
|
||||
@ -3861,7 +3861,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
|
||||
// Unless optimizing for size, don't fold to avoid partial
|
||||
// register update stalls
|
||||
if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) &&
|
||||
if (!MF.getFunction()->getFnAttributes().hasOptimizeForSizeAttr() &&
|
||||
hasPartialRegUpdate(MI->getOpcode()))
|
||||
return 0;
|
||||
|
||||
|
@ -399,7 +399,7 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
|
||||
const Function *F = MF.getFunction();
|
||||
unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
|
||||
bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
|
||||
F->hasFnAttr(Attribute::StackAlignment));
|
||||
F->getFnAttributes().hasStackAlignmentAttr());
|
||||
|
||||
// If we've requested that we force align the stack do so now.
|
||||
if (ForceStackAlign)
|
||||
|
@ -65,7 +65,7 @@ Pass *llvm::createAlwaysInlinerPass(bool InsertLifetime) {
|
||||
|
||||
/// \brief Minimal filter to detect invalid constructs for inlining.
|
||||
static bool isInlineViable(Function &F) {
|
||||
bool ReturnsTwice = F.hasFnAttr(Attribute::ReturnsTwice);
|
||||
bool ReturnsTwice = F.getFnAttributes().hasReturnsTwiceAttr();
|
||||
for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
|
||||
// Disallow inlining of functions which contain an indirect branch.
|
||||
if (isa<IndirectBrInst>(BI->getTerminator()))
|
||||
@ -114,7 +114,7 @@ InlineCost AlwaysInliner::getInlineCost(CallSite CS) {
|
||||
if (Callee->isDeclaration()) return InlineCost::getNever();
|
||||
|
||||
// Return never for anything not marked as always inline.
|
||||
if (!Callee->hasFnAttr(Attribute::AlwaysInline))
|
||||
if (!Callee->getFnAttributes().hasAlwaysInlineAttr())
|
||||
return InlineCost::getNever();
|
||||
|
||||
// Do some minimal analysis to preclude non-viable functions.
|
||||
|
@ -93,10 +93,10 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
|
||||
|
||||
// If the inlined function had a higher stack protection level than the
|
||||
// calling function, then bump up the caller's stack protection level.
|
||||
if (Callee->hasFnAttr(Attribute::StackProtectReq))
|
||||
if (Callee->getFnAttributes().hasStackProtectReqAttr())
|
||||
Caller->addFnAttr(Attribute::StackProtectReq);
|
||||
else if (Callee->hasFnAttr(Attribute::StackProtect) &&
|
||||
!Caller->hasFnAttr(Attribute::StackProtectReq))
|
||||
else if (Callee->getFnAttributes().hasStackProtectAttr() &&
|
||||
!Caller->getFnAttributes().hasStackProtectReqAttr())
|
||||
Caller->addFnAttr(Attribute::StackProtect);
|
||||
|
||||
// Look at all of the allocas that we inlined through this call site. If we
|
||||
@ -209,7 +209,7 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
|
||||
// would decrease the threshold.
|
||||
Function *Caller = CS.getCaller();
|
||||
bool OptSize = Caller && !Caller->isDeclaration() &&
|
||||
Caller->hasFnAttr(Attribute::OptimizeForSize);
|
||||
Caller->getFnAttributes().hasOptimizeForSizeAttr();
|
||||
if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
|
||||
OptSizeThreshold < thres)
|
||||
thres = OptSizeThreshold;
|
||||
@ -217,7 +217,7 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const {
|
||||
// Listen to the inlinehint attribute when it would increase the threshold.
|
||||
Function *Callee = CS.getCalledFunction();
|
||||
bool InlineHint = Callee && !Callee->isDeclaration() &&
|
||||
Callee->hasFnAttr(Attribute::InlineHint);
|
||||
Callee->getFnAttributes().hasInlineHintAttr();
|
||||
if (InlineHint && HintThreshold > thres)
|
||||
thres = HintThreshold;
|
||||
|
||||
@ -533,7 +533,7 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
|
||||
// Handle the case when this function is called and we only want to care
|
||||
// about always-inline functions. This is a bit of a hack to share code
|
||||
// between here and the InlineAlways pass.
|
||||
if (AlwaysInlineOnly && !F->hasFnAttr(Attribute::AlwaysInline))
|
||||
if (AlwaysInlineOnly && !F->getFnAttributes().hasAlwaysInlineAttr())
|
||||
continue;
|
||||
|
||||
// If the only remaining users of the function are dead constants, remove
|
||||
|
@ -854,7 +854,7 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) {
|
||||
// If needed, insert __asan_init before checking for AddressSafety attr.
|
||||
maybeInsertAsanInitAtFunctionEntry(F);
|
||||
|
||||
if (!F.hasFnAttr(Attribute::AddressSafety)) return false;
|
||||
if (!F.getFnAttributes().hasAddressSafetyAttr()) return false;
|
||||
|
||||
if (!ClDebugFunc.empty() && ClDebugFunc != F.getName())
|
||||
return false;
|
||||
|
@ -149,7 +149,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
|
||||
TLInfo = &getAnalysis<TargetLibraryInfo>();
|
||||
DT = getAnalysisIfAvailable<DominatorTree>();
|
||||
PFI = getAnalysisIfAvailable<ProfileInfo>();
|
||||
OptSize = F.hasFnAttr(Attribute::OptimizeForSize);
|
||||
OptSize = F.getFnAttributes().hasOptimizeForSizeAttr();
|
||||
|
||||
/// This optimization identifies DIV instructions that can be
|
||||
/// profitably bypassed and carried out with a shorter, faster divide.
|
||||
|
@ -145,7 +145,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) {
|
||||
// not user specified.
|
||||
unsigned Threshold = CurrentThreshold;
|
||||
if (!UserThreshold &&
|
||||
Header->getParent()->hasFnAttr(Attribute::OptimizeForSize))
|
||||
Header->getParent()->getFnAttributes().hasOptimizeForSizeAttr())
|
||||
Threshold = OptSizeUnrollThreshold;
|
||||
|
||||
// Find trip count and trip multiple if count is not available
|
||||
|
@ -638,7 +638,7 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) {
|
||||
// Check to see if it would be profitable to unswitch current loop.
|
||||
|
||||
// Do not do non-trivial unswitch while optimizing for size.
|
||||
if (OptimizeForSize || F->hasFnAttr(Attribute::OptimizeForSize))
|
||||
if (OptimizeForSize || F->getFnAttributes().hasOptimizeForSizeAttr())
|
||||
return false;
|
||||
|
||||
UnswitchNontrivialCondition(LoopCond, Val, currentLoop);
|
||||
|
Loading…
x
Reference in New Issue
Block a user