[stack protector] Fix a potential security bug in stack protector where the

address of the stack guard was being spilled to the stack.

Previously the address of the stack guard would get spilled to the stack if it
was impossible to keep it in a register. This patch introduces a new target
independent node and pseudo instruction which gets expanded post-RA to a
sequence of instructions that load the stack guard value. Register allocator
can now just remat the value when it can't keep it in a register. 

<rdar://problem/12475629>


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@213967 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Akira Hatanaka 2014-07-25 19:31:34 +00:00
parent 24dfa53b8a
commit 0651a556fe
26 changed files with 377 additions and 9 deletions

View File

@ -841,6 +841,14 @@ def PATCHPOINT : Instruction {
let mayLoad = 1;
let usesCustomInserter = 1;
}
def LOAD_STACK_GUARD : Instruction {
let OutOperandList = (outs ptr_rc:$dst);
let InOperandList = (ins);
let mayLoad = 1;
bit isReMaterializable = 1;
let hasSideEffects = 0;
bit isPseudo = 1;
}
}
//===----------------------------------------------------------------------===//

View File

@ -2598,6 +2598,12 @@ public:
/// ARM 's' setting instructions.
virtual void
AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
/// If this function returns true, SelectionDAGBuilder emits a
/// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
virtual bool useLoadStackGuardNode() const {
return false;
}
};
/// Given an LLVM IR type and return type attributes, compute the return value

View File

@ -104,7 +104,13 @@ enum {
/// support optimizations for dynamic languages (such as javascript) that
/// rewrite calls to runtimes with more efficient code sequences.
/// This also implies a stack map.
PATCHPOINT = 18
PATCHPOINT = 18,
/// This pseudo-instruction loads the stack guard value. Targets which need
/// to prevent the stack guard value or address from being spilled to the
/// stack should override TargetLowering::emitLoadStackGuardNode and
/// additionally expand this pseudo after register allocation.
LOAD_STACK_GUARD = 19
};
} // end namespace TargetOpcode
} // end namespace llvm

View File

@ -1811,9 +1811,19 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
unsigned Align =
TLI->getDataLayout()->getPrefTypeAlignment(IRGuard->getType());
SDValue Guard = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
GuardPtr, MachinePointerInfo(IRGuard, 0),
true, false, false, Align);
SDValue Guard;
// If useLoadStackGuardNode returns true, retrieve the guard value from
// the virtual register holding the value. Otherwise, emit a volatile load
// to retrieve the stack guard value.
if (TLI->useLoadStackGuardNode())
Guard = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
SPD.getGuardReg(), PtrTy);
else
Guard = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
GuardPtr, MachinePointerInfo(IRGuard, 0),
true, false, false, Align);
SDValue StackSlot = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
StackSlotPtr,
@ -5228,8 +5238,35 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
EVT PtrTy = TLI->getPointerTy();
SDValue Src, Chain = getRoot();
if (TLI->useLoadStackGuardNode()) {
// Emit a LOAD_STACK_GUARD node.
MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD,
sdl, PtrTy, Chain);
LoadInst *LI = cast<LoadInst>(I.getArgOperand(0));
MachinePointerInfo MPInfo(LI->getPointerOperand());
MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
unsigned Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MOInvariant;
*MemRefs = MF.getMachineMemOperand(MPInfo, Flags,
PtrTy.getSizeInBits() / 8,
DAG.getEVTAlignment(PtrTy));
Node->setMemRefs(MemRefs, MemRefs + 1);
// Copy the guard value to a virtual register so that it can be
// retrieved in the epilogue.
Src = SDValue(Node, 0);
const TargetRegisterClass *RC =
TLI->getRegClassFor(Src.getSimpleValueType());
unsigned Reg = MF.getRegInfo().createVirtualRegister(RC);
SPDescriptor.setGuardReg(Reg);
Chain = DAG.getCopyToReg(Chain, sdl, Reg, Src);
} else {
Src = getValue(I.getArgOperand(0)); // The guard's value.
}
SDValue Src = getValue(I.getArgOperand(0)); // The guard's value.
AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
int FI = FuncInfo.StaticAllocaMap[Slot];
@ -5238,7 +5275,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
// Store the stack protector onto the stack.
Res = DAG.getStore(getRoot(), sdl, Src, FIN,
Res = DAG.getStore(Chain, sdl, Src, FIN,
MachinePointerInfo::getFixedStack(FI),
true, false, 0);
setValue(&I, Res);

View File

@ -397,7 +397,8 @@ private:
class StackProtectorDescriptor {
public:
StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr),
FailureMBB(nullptr), Guard(nullptr) { }
FailureMBB(nullptr), Guard(nullptr),
GuardReg(0) { }
~StackProtectorDescriptor() { }
/// Returns true if all fields of the stack protector descriptor are
@ -455,6 +456,9 @@ private:
MachineBasicBlock *getFailureMBB() { return FailureMBB; }
const Value *getGuard() { return Guard; }
unsigned getGuardReg() const { return GuardReg; }
void setGuardReg(unsigned R) { GuardReg = R; }
private:
/// The basic block for which we are generating the stack protector.
///
@ -477,6 +481,9 @@ private:
/// stack protector stack slot.
const Value *Guard;
/// The virtual register holding the stack guard value.
unsigned GuardReg;
/// Add a successor machine basic block to ParentMBB. If the successor mbb
/// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
/// block will be created.

View File

@ -8095,6 +8095,10 @@ bool AArch64TargetLowering::shouldExpandAtomicInIR(Instruction *Inst) const {
return Inst->getType()->getPrimitiveSizeInBits() <= 128;
}
bool AArch64TargetLowering::useLoadStackGuardNode() const {
return true;
}
TargetLoweringBase::LegalizeTypeAction
AArch64TargetLowering::getPreferredVectorAction(EVT VT) const {
MVT SVT = VT.getSimpleVT();

View File

@ -324,6 +324,7 @@ public:
bool shouldExpandAtomicInIR(Instruction *Inst) const override;
bool useLoadStackGuardNode() const override;
TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(EVT VT) const override;

View File

@ -848,6 +848,56 @@ bool AArch64InstrInfo::optimizeCompareInstr(
return true;
}
bool
AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
return false;
MachineBasicBlock &MBB = *MI->getParent();
DebugLoc DL = MI->getDebugLoc();
unsigned Reg = MI->getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MI->memoperands_begin())->getValue());
const TargetMachine &TM = MBB.getParent()->getTarget();
unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
const unsigned char MO_NC = AArch64II::MO_NC;
if ((OpFlags & AArch64II::MO_GOT) != 0) {
BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
.addGlobalAddress(GV, 0, AArch64II::MO_GOT);
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill).addImm(0)
.addMemOperand(*MI->memoperands_begin());
} else if (TM.getCodeModel() == CodeModel::Large) {
BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
.addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill).addImm(0)
.addMemOperand(*MI->memoperands_begin());
} else {
BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
.addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
.addReg(Reg, RegState::Kill)
.addGlobalAddress(GV, 0, LoFlags)
.addMemOperand(*MI->memoperands_begin());
}
MBB.erase(MI);
return true;
}
/// Return true if this is this instruction has a non-zero immediate
bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
switch (MI->getOpcode()) {

View File

@ -154,6 +154,8 @@ public:
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
private:
void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL,
MachineBasicBlock *TBB,

View File

@ -1174,7 +1174,20 @@ unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
}
bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{
bool
ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
MachineFunction &MF = *MI->getParent()->getParent();
Reloc::Model RM = MF.getTarget().getRelocationModel();
if (MI->getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
assert(getSubtarget().getTargetTriple().getObjectFormat() ==
Triple::MachO &&
"LOAD_STACK_GUARD currently supported only for MachO.");
expandLoadStackGuard(MI, RM);
MI->getParent()->erase(MI);
return true;
}
// This hook gets to expand COPY instructions before they become
// copyPhysReg() calls. Look for VMOVS instructions that can legally be
// widened to VMOVD. We prefer the VMOVD when possible because it may be
@ -3933,6 +3946,38 @@ bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI,
return true;
}
// LoadStackGuard has so far only been implemented for MachO. Different code
// sequence is needed for other targets.
void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
unsigned LoadImmOpc,
unsigned LoadOpc,
Reloc::Model RM) const {
MachineBasicBlock &MBB = *MI->getParent();
DebugLoc DL = MI->getDebugLoc();
unsigned Reg = MI->getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MI->memoperands_begin())->getValue());
MachineInstrBuilder MIB;
BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
.addGlobalAddress(GV, 0, ARMII::MO_NONLAZY);
if (Subtarget.GVIsIndirectSymbol(GV, RM)) {
MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
MIB.addReg(Reg, RegState::Kill).addImm(0);
unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
MachineMemOperand *MMO = MBB.getParent()->
getMachineMemOperand(MachinePointerInfo::getGOT(), Flag, 4, 4);
MIB.addMemOperand(MMO);
AddDefaultPred(MIB);
}
MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
MIB.addReg(Reg, RegState::Kill).addImm(0);
MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
AddDefaultPred(MIB);
}
bool
ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
unsigned &AddSubOpc,

View File

@ -18,6 +18,7 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetInstrInfo.h"
#define GET_INSTRINFO_HEADER
@ -34,6 +35,10 @@ protected:
// Can be only subclassed.
explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
unsigned LoadImmOpc, unsigned LoadOpc,
Reloc::Model RM) const;
public:
// Return whether the target has an explicit NOP encoding.
bool hasNOP() const;
@ -286,6 +291,9 @@ private:
bool verifyInstruction(const MachineInstr *MI,
StringRef &ErrInfo) const override;
virtual void expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const = 0;
private:
/// Modeling special VFP / NEON fp MLA / MLS hazards.

View File

@ -10804,6 +10804,11 @@ bool ARMTargetLowering::shouldExpandAtomicInIR(Instruction *Inst) const {
return Inst->getType()->getPrimitiveSizeInBits() <= AtomicLimit;
}
// This has so far only been implemented for MachO.
bool ARMTargetLowering::useLoadStackGuardNode() const {
return Subtarget->getTargetTriple().getObjectFormat() == Triple::MachO;
}
Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
Module *M = Builder.GetInsertBlock()->getParent()->getParent();

View File

@ -398,6 +398,8 @@ namespace llvm {
bool shouldExpandAtomicInIR(Instruction *Inst) const override;
bool useLoadStackGuardNode() const override;
protected:
std::pair<const TargetRegisterClass*, uint8_t>
findRepresentativeClass(MVT VT) const override;

View File

@ -90,6 +90,14 @@ unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const {
return 0;
}
void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const {
if (RM == Reloc::Static)
expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_abs, ARM::LDRi12, RM);
else
expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_pcrel, ARM::LDRi12, RM);
}
namespace {
/// ARMCGBR - Create Global Base Reg pass. This initializes the PIC
/// global base register for ARM ELF.

View File

@ -37,6 +37,10 @@ public:
/// always be able to get register info as well (through this method).
///
const ARMRegisterInfo &getRegisterInfo() const override { return RI; }
private:
void expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const override;
};
}

View File

@ -101,3 +101,12 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
}
}
void
Thumb1InstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const {
if (RM == Reloc::Static)
expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_abs, ARM::tLDRi, RM);
else
expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_pcrel, ARM::tLDRi, RM);
}

View File

@ -54,6 +54,9 @@ public:
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
private:
void expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const override;
};
}

View File

@ -209,6 +209,15 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
}
void
Thumb2InstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const {
if (RM == Reloc::Static)
expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12, RM);
else
expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12, RM);
}
void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
unsigned DestReg, unsigned BaseReg, int NumBytes,

View File

@ -61,6 +61,10 @@ public:
/// always be able to get register info as well (through this method).
///
const Thumb2RegisterInfo &getRegisterInfo() const override { return RI; }
private:
void expandLoadStackGuard(MachineBasicBlock::iterator MI,
Reloc::Model RM) const override;
};
/// getITInstrPredicate - Valid only in Thumb2 mode. This function is identical

View File

@ -1645,6 +1645,12 @@ void X86TargetLowering::resetOperationActions() {
setPrefFunctionAlignment(4); // 2^4 bytes.
}
// This has so far only been implemented for 64-bit MachO.
bool X86TargetLowering::useLoadStackGuardNode() const {
return Subtarget->getTargetTriple().getObjectFormat() == Triple::MachO &&
Subtarget->is64Bit();
}
TargetLoweringBase::LegalizeTypeAction
X86TargetLowering::getPreferredVectorAction(EVT VT) const {
if (ExperimentalVectorWideningLegalization &&

View File

@ -796,6 +796,7 @@ namespace llvm {
/// \brief Reset the operation actions based on target options.
void resetOperationActions() override;
bool useLoadStackGuardNode() const override;
/// \brief Customize the preferred legalization strategy for certain types.
LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;

View File

@ -3963,6 +3963,28 @@ static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
return true;
}
// LoadStackGuard has so far only been implemented for 64-bit MachO. Different
// code sequence is needed for other targets.
static void expandLoadStackGuard(MachineInstrBuilder &MIB,
const TargetInstrInfo &TII) {
MachineBasicBlock &MBB = *MIB->getParent();
DebugLoc DL = MIB->getDebugLoc();
unsigned Reg = MIB->getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
MachineMemOperand *MMO = MBB.getParent()->
getMachineMemOperand(MachinePointerInfo::getGOT(), Flag, 8, 8);
MachineBasicBlock::iterator I = MIB;
BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
.addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
.addMemOperand(MMO);
MIB->setDebugLoc(DL);
MIB->setDesc(TII.get(X86::MOV64rm));
MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
}
bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
bool HasAVX = Subtarget.hasAVX();
MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
@ -3997,6 +4019,9 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
case X86::KSET0W: return Expand2AddrUndef(MIB, get(X86::KXORWrr));
case X86::KSET1B:
case X86::KSET1W: return Expand2AddrUndef(MIB, get(X86::KXNORWrr));
case TargetOpcode::LOAD_STACK_GUARD:
expandLoadStackGuard(MIB, *this);
return true;
}
return false;
}

View File

@ -0,0 +1,48 @@
; RUN: llc < %s -mtriple=arm64-apple-ios -relocation-model=pic -no-integrated-as | FileCheck %s -check-prefix=DARWIN
; RUN: llc < %s -mtriple=arm64-apple-ios -relocation-model=static -no-integrated-as | FileCheck %s -check-prefix=DARWIN
; RUN: llc < %s -mtriple=aarch64-linux-gnu -relocation-model=pic -no-integrated-as | FileCheck %s -check-prefix=PIC-LINUX
; RUN: llc < %s -mtriple=aarch64-linux-gnu -relocation-model=static -code-model=large -no-integrated-as | FileCheck %s -check-prefix=STATIC-LARGE
; RUN: llc < %s -mtriple=aarch64-linux-gnu -relocation-model=static -code-model=small -no-integrated-as | FileCheck %s -check-prefix=STATIC-SMALL
; DARWIN: foo2
; DARWIN: adrp [[R0:x[0-9]+]], ___stack_chk_guard@GOTPAGE
; DARWIN: ldr [[R1:x[0-9]+]], {{\[}}[[R0]], ___stack_chk_guard@GOTPAGEOFF{{\]}}
; DARWIN: ldr {{x[0-9]+}}, {{\[}}[[R1]]{{\]}}
; PIC-LINUX: foo2
; PIC-LINUX: adrp [[R0:x[0-9]+]], :got:__stack_chk_guard
; PIC-LINUX: ldr [[R1:x[0-9]+]], {{\[}}[[R0]], :got_lo12:__stack_chk_guard{{\]}}
; PIC-LINUX: ldr {{x[0-9]+}}, {{\[}}[[R1]]{{\]}}
; STATIC-LARGE: foo2
; STATIC-LARGE: movz [[R0:x[0-9]+]], #:abs_g3:__stack_chk_guard
; STATIC-LARGE: movk [[R0]], #:abs_g2_nc:__stack_chk_guard
; STATIC-LARGE: movk [[R0]], #:abs_g1_nc:__stack_chk_guard
; STATIC-LARGE: movk [[R0]], #:abs_g0_nc:__stack_chk_guard
; STATIC-LARGE: ldr {{x[0-9]+}}, {{\[}}[[R0]]{{\]}}
; STATIC-SMALL: foo2
; STATIC-SMALL: adrp [[R0:x[0-9]+]], __stack_chk_guard
; STATIC-SMALL: ldr {{x[0-9]+}}, {{\[}}[[R0]], :lo12:__stack_chk_guard{{\]}}
define i32 @test_stack_guard_remat() #0 {
entry:
%a1 = alloca [256 x i32], align 4
%0 = bitcast [256 x i32]* %a1 to i8*
call void @llvm.lifetime.start(i64 1024, i8* %0)
%arraydecay = getelementptr inbounds [256 x i32]* %a1, i64 0, i64 0
call void @foo3(i32* %arraydecay)
call void asm sideeffect "foo2", "~{w0},~{w1},~{w2},~{w3},~{w4},~{w5},~{w6},~{w7},~{w8},~{w9},~{w10},~{w11},~{w12},~{w13},~{w14},~{w15},~{w16},~{w17},~{w18},~{w19},~{w20},~{w21},~{w22},~{w23},~{w24},~{w25},~{w26},~{w27},~{w28},~{w29},~{w30}"()
call void @llvm.lifetime.end(i64 1024, i8* %0)
ret i32 0
}
; Function Attrs: nounwind
declare void @llvm.lifetime.start(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture)
attributes #0 = { nounwind sspstrong "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

View File

@ -0,0 +1,41 @@
; RUN: llc < %s -mtriple=arm-apple-ios -relocation-model=pic -no-integrated-as | FileCheck %s -check-prefix=PIC
; RUN: llc < %s -mtriple=arm-apple-ios -relocation-model=static -no-integrated-as | FileCheck %s -check-prefix=STATIC
;PIC: foo2
;PIC: ldr [[R0:r[0-9]+]], [[LABEL0:LCPI[0-9_]+]]
;PIC: [[LABEL1:LPC0_1]]:
;PIC: ldr [[R1:r[0-9]+]], [pc, [[R0]]]
;PIC: ldr [[R2:r[0-9]+]], {{\[}}[[R1]]{{\]}}
;PIC: ldr {{r[0-9]+}}, {{\[}}[[R2]]{{\]}}
;PIC: [[LABEL0]]:
;PIC-NEXT: .long L___stack_chk_guard$non_lazy_ptr-([[LABEL1]]+8)
;STATIC: foo2
;STATIC: ldr [[R0:r[0-9]+]], [[LABEL0:LCPI[0-9_]+]]
;STATIC: ldr {{r[0-9]+}}, {{\[}}[[R0]]{{\]}}
;STATIC: [[LABEL0]]:
;STATIC-NEXT: .long ___stack_chk_guard
; Function Attrs: nounwind ssp
define i32 @test_stack_guard_remat() #0 {
%a1 = alloca [256 x i32], align 4
%1 = bitcast [256 x i32]* %a1 to i8*
call void @llvm.lifetime.start(i64 1024, i8* %1)
%2 = getelementptr inbounds [256 x i32]* %a1, i32 0, i32 0
call void @foo3(i32* %2) #3
call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
call void @llvm.lifetime.end(i64 1024, i8* %1)
ret i32 0
}
; Function Attrs: nounwind
declare void @llvm.lifetime.start(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture)
attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

View File

@ -0,0 +1,28 @@
; RUN: llc < %s -mtriple=x86_64-apple-darwin -no-integrated-as | FileCheck %s -check-prefix=CHECK
;CHECK: foo2
;CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), [[R0:%[a-z0-9]+]]
;CHECK: movq ([[R0]]), {{%[a-z0-9]+}}
; Function Attrs: nounwind ssp uwtable
define i32 @test_stack_guard_remat() #0 {
entry:
%a1 = alloca [256 x i32], align 16
%0 = bitcast [256 x i32]* %a1 to i8*
call void @llvm.lifetime.start(i64 1024, i8* %0)
%arraydecay = getelementptr inbounds [256 x i32]* %a1, i64 0, i64 0
call void @foo3(i32* %arraydecay)
call void asm sideeffect "foo2", "~{r12},~{r13},~{r14},~{r15},~{ebx},~{esi},~{edi},~{dirflag},~{fpsr},~{flags}"()
call void @llvm.lifetime.end(i64 1024, i8* %0)
ret i32 0
}
; Function Attrs: nounwind
declare void @llvm.lifetime.start(i64, i8* nocapture)
declare void @foo3(i32*)
; Function Attrs: nounwind
declare void @llvm.lifetime.end(i64, i8* nocapture)
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }

View File

@ -301,7 +301,8 @@ void CodeGenTarget::ComputeInstrsByEnum() const {
"GC_LABEL", "KILL", "EXTRACT_SUBREG", "INSERT_SUBREG",
"IMPLICIT_DEF", "SUBREG_TO_REG", "COPY_TO_REGCLASS", "DBG_VALUE",
"REG_SEQUENCE", "COPY", "BUNDLE", "LIFETIME_START",
"LIFETIME_END", "STACKMAP", "PATCHPOINT", nullptr};
"LIFETIME_END", "STACKMAP", "PATCHPOINT", "LOAD_STACK_GUARD",
nullptr};
const DenseMap<const Record*, CodeGenInstruction*> &Insts = getInstructions();
for (const char *const *p = FixedInstrs; *p; ++p) {
const CodeGenInstruction *Instr = GetInstByName(*p, Insts, Records);