mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-14 17:34:41 +00:00
[PowerPC] Prepare loops for pre-increment loads/stores
PowerPC supports pre-increment load/store instructions (except for Altivec/VSX vector load/stores). Using these on embedded cores can be very important, but most loops are not naturally set up to use them. We can often change that, however, by placing loops into a non-canonical form. Generically, this means transforming loops like this: for (int i = 0; i < n; ++i) array[i] = c; to look like this: T *p = array[-1]; for (int i = 0; i < n; ++i) *++p = c; the key point is that addresses accessed are pulled into dedicated PHIs and "pre-decremented" in the loop preheader. This allows the use of pre-increment load/store instructions without loop peeling. A target-specific late IR-level pass (running post-LSR), PPCLoopPreIncPrep, is introduced to perform this transformation. I've used this code out-of-tree for generating code for the PPC A2 for over a year. Somewhat to my surprise, running the test suite + externals on a P7 with this transformation enabled showed no performance regressions, and one speedup: External/SPEC/CINT2006/483.xalancbmk/483.xalancbmk -2.32514% +/- 1.03736% So I'm going to enable it on everything for now. I was surprised by this because, on the POWER cores, these pre-increment load/store instructions are cracked (and, thus, harder to schedule effectively). But seeing no regressions, and feeling that it is generally easier to split instructions apart late than it is to combine them late, this might be the better approach regardless. In the future, we might want to integrate this functionality into LSR (but currently LSR does not create new PHI nodes, so (for that and other reasons) significant work would need to be done). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@228328 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
885b67a5c3
commit
b8a6712c27
@ -23,6 +23,7 @@ add_llvm_target(PowerPCCodeGen
|
||||
PPCEarlyReturn.cpp
|
||||
PPCFastISel.cpp
|
||||
PPCFrameLowering.cpp
|
||||
PPCLoopPreIncPrep.cpp
|
||||
PPCMCInstLower.cpp
|
||||
PPCMachineFunctionInfo.cpp
|
||||
PPCRegisterInfo.cpp
|
||||
|
@ -34,6 +34,7 @@ namespace llvm {
|
||||
#ifndef NDEBUG
|
||||
FunctionPass *createPPCCTRLoopsVerify();
|
||||
#endif
|
||||
FunctionPass *createPPCLoopPreIncPrepPass(PPCTargetMachine &TM);
|
||||
FunctionPass *createPPCEarlyReturnPass();
|
||||
FunctionPass *createPPCVSXCopyPass();
|
||||
FunctionPass *createPPCVSXFMAMutatePass();
|
||||
|
374
lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
Normal file
374
lib/Target/PowerPC/PPCLoopPreIncPrep.cpp
Normal file
@ -0,0 +1,374 @@
|
||||
//===------ PPCLoopPreIncPrep.cpp - Loop Pre-Inc. AM Prep. Pass -----------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements a pass to prepare loops for pre-increment addressing
|
||||
// modes. Additional PHIs are created for loop induction variables used by
|
||||
// load/store instructions so that the pre-increment forms can be used.
|
||||
// Generically, this means transforming loops like this:
|
||||
// for (int i = 0; i < n; ++i)
|
||||
// array[i] = c;
|
||||
// to look like this:
|
||||
// T *p = array[-1];
|
||||
// for (int i = 0; i < n; ++i)
|
||||
// *++p = c;
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define DEBUG_TYPE "ppc-loop-preinc-prep"
|
||||
#include "PPC.h"
|
||||
#include "PPCTargetMachine.h"
|
||||
#include "llvm/Transforms/Scalar.h"
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/Analysis/CodeMetrics.h"
|
||||
#include "llvm/Analysis/InstructionSimplify.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/ScalarEvolution.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
||||
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
#include "llvm/IR/CFG.h"
|
||||
#include "llvm/IR/Dominators.h"
|
||||
#include "llvm/IR/Function.h"
|
||||
#include "llvm/IR/IntrinsicInst.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/Transforms/Utils/ValueMapper.h"
|
||||
using namespace llvm;
|
||||
|
||||
// By default, we limit this to creating 16 PHIs (which is a little over half
|
||||
// of the allocatable register set).
|
||||
static cl::opt<unsigned> MaxVars("ppc-preinc-prep-max-vars",
|
||||
cl::Hidden, cl::init(16),
|
||||
cl::desc("Potential PHI threshold for PPC preinc loop prep"));
|
||||
|
||||
namespace llvm {
|
||||
void initializePPCLoopPreIncPrepPass(PassRegistry&);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
class PPCLoopPreIncPrep : public FunctionPass {
|
||||
public:
|
||||
static char ID; // Pass ID, replacement for typeid
|
||||
PPCLoopPreIncPrep() : FunctionPass(ID), TM(nullptr) {
|
||||
initializePPCLoopPreIncPrepPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
PPCLoopPreIncPrep(PPCTargetMachine &TM) : FunctionPass(ID), TM(&TM) {
|
||||
initializePPCLoopPreIncPrepPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
||||
AU.addPreserved<DominatorTreeWrapperPass>();
|
||||
AU.addRequired<LoopInfoWrapperPass>();
|
||||
AU.addPreserved<LoopInfoWrapperPass>();
|
||||
AU.addRequired<ScalarEvolution>();
|
||||
}
|
||||
|
||||
bool runOnFunction(Function &F) override;
|
||||
|
||||
bool runOnLoop(Loop *L);
|
||||
void simplifyLoopLatch(Loop *L);
|
||||
bool rotateLoop(Loop *L);
|
||||
|
||||
private:
|
||||
PPCTargetMachine *TM;
|
||||
LoopInfo *LI;
|
||||
ScalarEvolution *SE;
|
||||
const DataLayout *DL;
|
||||
};
|
||||
}
|
||||
|
||||
char PPCLoopPreIncPrep::ID = 0;
|
||||
const char *name = "Prepare loop for pre-inc. addressing modes";
|
||||
INITIALIZE_PASS_BEGIN(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false)
|
||||
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
|
||||
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
|
||||
INITIALIZE_PASS_END(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false)
|
||||
|
||||
FunctionPass *llvm::createPPCLoopPreIncPrepPass(PPCTargetMachine &TM) {
|
||||
return new PPCLoopPreIncPrep(TM);
|
||||
}
|
||||
|
||||
namespace {
|
||||
struct SCEVLess : std::binary_function<const SCEV *, const SCEV *, bool>
|
||||
{
|
||||
SCEVLess(ScalarEvolution *SE) : SE(SE) {}
|
||||
|
||||
bool operator() (const SCEV *X, const SCEV *Y) const {
|
||||
const SCEV *Diff = SE->getMinusSCEV(X, Y);
|
||||
return cast<SCEVConstant>(Diff)->getValue()->getSExtValue() < 0;
|
||||
}
|
||||
|
||||
protected:
|
||||
ScalarEvolution *SE;
|
||||
};
|
||||
}
|
||||
|
||||
static bool IsPtrInBounds(Value *BasePtr) {
|
||||
Value *StrippedBasePtr = BasePtr;
|
||||
while (BitCastInst *BC = dyn_cast<BitCastInst>(StrippedBasePtr))
|
||||
StrippedBasePtr = BC->getOperand(0);
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(StrippedBasePtr))
|
||||
return GEP->isInBounds();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static Value *GetPointerOperand(Value *MemI) {
|
||||
if (LoadInst *LMemI = dyn_cast<LoadInst>(MemI)) {
|
||||
return LMemI->getPointerOperand();
|
||||
} else if (StoreInst *SMemI = dyn_cast<StoreInst>(MemI)) {
|
||||
return SMemI->getPointerOperand();
|
||||
} else if (IntrinsicInst *IMemI = dyn_cast<IntrinsicInst>(MemI)) {
|
||||
if (IMemI->getIntrinsicID() == Intrinsic::prefetch)
|
||||
return IMemI->getArgOperand(0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool PPCLoopPreIncPrep::runOnFunction(Function &F) {
|
||||
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
||||
SE = &getAnalysis<ScalarEvolution>();
|
||||
|
||||
DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
|
||||
DL = DLP ? &DLP->getDataLayout() : 0;
|
||||
|
||||
bool MadeChange = false;
|
||||
|
||||
for (LoopInfo::iterator I = LI->begin(), E = LI->end();
|
||||
I != E; ++I) {
|
||||
Loop *L = *I;
|
||||
MadeChange |= runOnLoop(L);
|
||||
}
|
||||
|
||||
return MadeChange;
|
||||
}
|
||||
|
||||
bool PPCLoopPreIncPrep::runOnLoop(Loop *L) {
|
||||
bool MadeChange = false;
|
||||
|
||||
if (!DL)
|
||||
return MadeChange;
|
||||
|
||||
// Only prep. the inner-most loop
|
||||
if (!L->empty())
|
||||
return MadeChange;
|
||||
|
||||
BasicBlock *Header = L->getHeader();
|
||||
BasicBlock *LoopPredecessor = L->getLoopPredecessor();
|
||||
if (!LoopPredecessor)
|
||||
return MadeChange;
|
||||
|
||||
const PPCSubtarget *ST =
|
||||
TM ? TM->getSubtargetImpl(*Header->getParent()) : nullptr;
|
||||
|
||||
unsigned HeaderLoopPredCount = 0;
|
||||
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
|
||||
PI != PE; ++PI) {
|
||||
++HeaderLoopPredCount;
|
||||
}
|
||||
|
||||
// Collect buckets of comparable addresses used by loads and stores.
|
||||
typedef std::multimap<const SCEV *, Instruction *, SCEVLess> Bucket;
|
||||
SmallVector<Bucket, 16> Buckets;
|
||||
for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
|
||||
I != IE; ++I) {
|
||||
for (BasicBlock::iterator J = (*I)->begin(), JE = (*I)->end();
|
||||
J != JE; ++J) {
|
||||
Value *PtrValue;
|
||||
Instruction *MemI;
|
||||
|
||||
if (LoadInst *LMemI = dyn_cast<LoadInst>(J)) {
|
||||
MemI = LMemI;
|
||||
PtrValue = LMemI->getPointerOperand();
|
||||
} else if (StoreInst *SMemI = dyn_cast<StoreInst>(J)) {
|
||||
MemI = SMemI;
|
||||
PtrValue = SMemI->getPointerOperand();
|
||||
} else if (IntrinsicInst *IMemI = dyn_cast<IntrinsicInst>(J)) {
|
||||
if (IMemI->getIntrinsicID() == Intrinsic::prefetch) {
|
||||
MemI = IMemI;
|
||||
PtrValue = IMemI->getArgOperand(0);
|
||||
} else continue;
|
||||
} else continue;
|
||||
|
||||
unsigned PtrAddrSpace = PtrValue->getType()->getPointerAddressSpace();
|
||||
if (PtrAddrSpace)
|
||||
continue;
|
||||
|
||||
// There are no update forms for Altivec vector load/stores.
|
||||
if (ST && ST->hasAltivec() &&
|
||||
PtrValue->getType()->getPointerElementType()->isVectorTy())
|
||||
continue;
|
||||
|
||||
if (L->isLoopInvariant(PtrValue))
|
||||
continue;
|
||||
|
||||
const SCEV *LSCEV = SE->getSCEV(PtrValue);
|
||||
if (!isa<SCEVAddRecExpr>(LSCEV))
|
||||
continue;
|
||||
|
||||
bool FoundBucket = false;
|
||||
for (unsigned i = 0, e = Buckets.size(); i != e; ++i)
|
||||
for (Bucket::iterator K = Buckets[i].begin(), KE = Buckets[i].end();
|
||||
K != KE; ++K) {
|
||||
const SCEV *Diff = SE->getMinusSCEV(K->first, LSCEV);
|
||||
if (isa<SCEVConstant>(Diff)) {
|
||||
Buckets[i].insert(std::make_pair(LSCEV, MemI));
|
||||
FoundBucket = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!FoundBucket) {
|
||||
Buckets.push_back(Bucket(SCEVLess(SE)));
|
||||
Buckets[Buckets.size()-1].insert(std::make_pair(LSCEV, MemI));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (Buckets.size() > MaxVars)
|
||||
return MadeChange;
|
||||
|
||||
SmallSet<BasicBlock *, 16> BBChanged;
|
||||
for (unsigned i = 0, e = Buckets.size(); i != e; ++i) {
|
||||
// The base address of each bucket is transformed into a phi and the others
|
||||
// are rewritten as offsets of that variable.
|
||||
|
||||
const SCEVAddRecExpr *BasePtrSCEV =
|
||||
cast<SCEVAddRecExpr>(Buckets[i].begin()->first);
|
||||
if (!BasePtrSCEV->isAffine())
|
||||
continue;
|
||||
|
||||
Instruction *MemI = Buckets[i].begin()->second;
|
||||
Value *BasePtr = GetPointerOperand(MemI);
|
||||
assert(BasePtr && "No pointer operand");
|
||||
|
||||
Type *I8PtrTy = Type::getInt8PtrTy(MemI->getParent()->getContext(),
|
||||
BasePtr->getType()->getPointerAddressSpace());
|
||||
|
||||
const SCEV *BasePtrStartSCEV = BasePtrSCEV->getStart();
|
||||
if (!SE->isLoopInvariant(BasePtrStartSCEV, L))
|
||||
continue;
|
||||
|
||||
const SCEVConstant *BasePtrIncSCEV =
|
||||
dyn_cast<SCEVConstant>(BasePtrSCEV->getStepRecurrence(*SE));
|
||||
if (!BasePtrIncSCEV)
|
||||
continue;
|
||||
BasePtrStartSCEV = SE->getMinusSCEV(BasePtrStartSCEV, BasePtrIncSCEV);
|
||||
if (!isSafeToExpand(BasePtrStartSCEV, *SE))
|
||||
continue;
|
||||
|
||||
PHINode *NewPHI = PHINode::Create(I8PtrTy, HeaderLoopPredCount,
|
||||
MemI->hasName() ? MemI->getName() + ".phi" : "",
|
||||
Header->getFirstNonPHI());
|
||||
|
||||
SCEVExpander SCEVE(*SE, "pistart");
|
||||
Value *BasePtrStart = SCEVE.expandCodeFor(BasePtrStartSCEV, I8PtrTy,
|
||||
LoopPredecessor->getTerminator());
|
||||
|
||||
// Note that LoopPredecessor might occur in the predecessor list multiple
|
||||
// times, and we need to add it the right number of times.
|
||||
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
|
||||
PI != PE; ++PI) {
|
||||
if (*PI != LoopPredecessor)
|
||||
continue;
|
||||
|
||||
NewPHI->addIncoming(BasePtrStart, LoopPredecessor);
|
||||
}
|
||||
|
||||
Instruction *InsPoint = Header->getFirstInsertionPt();
|
||||
GetElementPtrInst *PtrInc =
|
||||
GetElementPtrInst::Create(NewPHI, BasePtrIncSCEV->getValue(),
|
||||
MemI->hasName() ? MemI->getName() + ".inc" : "", InsPoint);
|
||||
PtrInc->setIsInBounds(IsPtrInBounds(BasePtr));
|
||||
for (pred_iterator PI = pred_begin(Header), PE = pred_end(Header);
|
||||
PI != PE; ++PI) {
|
||||
if (*PI == LoopPredecessor)
|
||||
continue;
|
||||
|
||||
NewPHI->addIncoming(PtrInc, *PI);
|
||||
}
|
||||
|
||||
Instruction *NewBasePtr;
|
||||
if (PtrInc->getType() != BasePtr->getType())
|
||||
NewBasePtr = new BitCastInst(PtrInc, BasePtr->getType(),
|
||||
PtrInc->hasName() ? PtrInc->getName() + ".cast" : "", InsPoint);
|
||||
else
|
||||
NewBasePtr = PtrInc;
|
||||
|
||||
if (Instruction *IDel = dyn_cast<Instruction>(BasePtr))
|
||||
BBChanged.insert(IDel->getParent());
|
||||
BasePtr->replaceAllUsesWith(NewBasePtr);
|
||||
RecursivelyDeleteTriviallyDeadInstructions(BasePtr);
|
||||
|
||||
Value *LastNewPtr = NewBasePtr;
|
||||
for (Bucket::iterator I = std::next(Buckets[i].begin()),
|
||||
IE = Buckets[i].end(); I != IE; ++I) {
|
||||
Value *Ptr = GetPointerOperand(I->second);
|
||||
assert(Ptr && "No pointer operand");
|
||||
if (Ptr == LastNewPtr)
|
||||
continue;
|
||||
|
||||
Instruction *RealNewPtr;
|
||||
const SCEVConstant *Diff =
|
||||
cast<SCEVConstant>(SE->getMinusSCEV(I->first, BasePtrSCEV));
|
||||
if (Diff->isZero()) {
|
||||
RealNewPtr = NewBasePtr;
|
||||
} else {
|
||||
Instruction *PtrIP = dyn_cast<Instruction>(Ptr);
|
||||
if (PtrIP && isa<Instruction>(NewBasePtr) &&
|
||||
cast<Instruction>(NewBasePtr)->getParent() == PtrIP->getParent())
|
||||
PtrIP = 0;
|
||||
else if (isa<PHINode>(PtrIP))
|
||||
PtrIP = PtrIP->getParent()->getFirstInsertionPt();
|
||||
else if (!PtrIP)
|
||||
PtrIP = I->second;
|
||||
|
||||
GetElementPtrInst *NewPtr =
|
||||
GetElementPtrInst::Create(PtrInc, Diff->getValue(),
|
||||
I->second->hasName() ? I->second->getName() + ".off" : "", PtrIP);
|
||||
if (!PtrIP)
|
||||
NewPtr->insertAfter(cast<Instruction>(PtrInc));
|
||||
NewPtr->setIsInBounds(IsPtrInBounds(Ptr));
|
||||
RealNewPtr = NewPtr;
|
||||
}
|
||||
|
||||
if (Instruction *IDel = dyn_cast<Instruction>(Ptr))
|
||||
BBChanged.insert(IDel->getParent());
|
||||
|
||||
Instruction *ReplNewPtr;
|
||||
if (Ptr->getType() != RealNewPtr->getType()) {
|
||||
ReplNewPtr = new BitCastInst(RealNewPtr, Ptr->getType(),
|
||||
Ptr->hasName() ? Ptr->getName() + ".cast" : "");
|
||||
ReplNewPtr->insertAfter(RealNewPtr);
|
||||
} else
|
||||
ReplNewPtr = RealNewPtr;
|
||||
|
||||
Ptr->replaceAllUsesWith(ReplNewPtr);
|
||||
RecursivelyDeleteTriviallyDeadInstructions(Ptr);
|
||||
|
||||
LastNewPtr = RealNewPtr;
|
||||
}
|
||||
|
||||
MadeChange = true;
|
||||
}
|
||||
|
||||
for (Loop::block_iterator I = L->block_begin(), IE = L->block_end();
|
||||
I != IE; ++I) {
|
||||
if (BBChanged.count(*I))
|
||||
DeleteDeadPHIs(*I);
|
||||
}
|
||||
|
||||
return MadeChange;
|
||||
}
|
||||
|
@ -30,6 +30,10 @@ static cl::
|
||||
opt<bool> DisableCTRLoops("disable-ppc-ctrloops", cl::Hidden,
|
||||
cl::desc("Disable CTR loops for PPC"));
|
||||
|
||||
static cl::
|
||||
opt<bool> DisablePreIncPrep("disable-ppc-preinc-prep", cl::Hidden,
|
||||
cl::desc("Disable PPC loop preinc prep"));
|
||||
|
||||
static cl::opt<bool>
|
||||
VSXFMAMutateEarly("schedule-ppc-vsx-fma-mutation-early",
|
||||
cl::Hidden, cl::desc("Schedule VSX FMA instruction mutation early"));
|
||||
@ -231,6 +235,9 @@ void PPCPassConfig::addIRPasses() {
|
||||
}
|
||||
|
||||
bool PPCPassConfig::addPreISel() {
|
||||
if (!DisablePreIncPrep && getOptLevel() != CodeGenOpt::None)
|
||||
addPass(createPPCLoopPreIncPrepPass(getPPCTargetMachine()));
|
||||
|
||||
if (!DisableCTRLoops && getOptLevel() != CodeGenOpt::None)
|
||||
addPass(createPPCCTRLoops(getPPCTargetMachine()));
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
; RUN: llc -mcpu=pwr7 < %s | FileCheck %s
|
||||
; RUN: llc -mcpu=pwr7 -disable-ppc-preinc-prep < %s | FileCheck %s
|
||||
; RUN: llc -mcpu=pwr7 < %s | FileCheck %s -check-prefix=PIP
|
||||
target datalayout = "E-m:e-i64:64-n32:64"
|
||||
target triple = "powerpc64-unknown-linux-gnu"
|
||||
|
||||
@ -21,6 +22,14 @@ entry:
|
||||
; CHECK-DAG: lfsx {{[0-9]+}}, [[REG2]],
|
||||
; CHECK: blr
|
||||
|
||||
; PIP-LABEL: @foo
|
||||
; PIP: addi [[REG1:[0-9]+]], 1,
|
||||
; PIP: addi [[REG2:[0-9]+]], 1,
|
||||
; PIP: %for.body.i
|
||||
; PIP-DAG: lfsu {{[0-9]+}}, 4([[REG1]])
|
||||
; PIP-DAG: lfsu {{[0-9]+}}, 4([[REG2]])
|
||||
; PIP: blr
|
||||
|
||||
for.body.i: ; preds = %for.body.i.preheader, %for.body.i
|
||||
%accumulator.09.i = phi double [ %add.i, %for.body.i ], [ 0.000000e+00, %entry ]
|
||||
%i.08.i = phi i64 [ %inc.i, %for.body.i ], [ 0, %entry ]
|
||||
|
@ -44,6 +44,9 @@ entry:
|
||||
; GENERIC-NOT: .align
|
||||
; BASIC: .align 4
|
||||
; PWR: .align 4
|
||||
; GENERIC: lwzu
|
||||
; BASIC: lwzu
|
||||
; PWR: lwzu
|
||||
; GENERIC: bdnz
|
||||
; BASIC: bdnz
|
||||
; PWR: bdnz
|
||||
@ -57,11 +60,13 @@ vector.body: ; preds = %vector.body, %entry
|
||||
%3 = load i32* %1, align 4
|
||||
%4 = add nsw i32 %2, 4
|
||||
%5 = add nsw i32 %3, 4
|
||||
store i32 %4, i32* %0, align 4
|
||||
store i32 %5, i32* %1, align 4
|
||||
%6 = mul nsw i32 %4, 3
|
||||
%7 = mul nsw i32 %5, 3
|
||||
store i32 %6, i32* %0, align 4
|
||||
store i32 %7, i32* %1, align 4
|
||||
%index.next = add i64 %index, 2
|
||||
%6 = icmp eq i64 %index.next, 2048
|
||||
br i1 %6, label %for.end, label %vector.body
|
||||
%8 = icmp eq i64 %index.next, 2048
|
||||
br i1 %8, label %for.end, label %vector.body
|
||||
|
||||
for.end: ; preds = %vector.body
|
||||
ret void
|
||||
@ -90,7 +95,8 @@ for.body: ; preds = %for.body, %entry
|
||||
%arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv
|
||||
%0 = load i32* %arrayidx, align 4
|
||||
%add = add nsw i32 %0, 4
|
||||
store i32 %add, i32* %arrayidx, align 4
|
||||
%mul = mul nsw i32 %add, 3
|
||||
store i32 %mul, i32* %arrayidx, align 4
|
||||
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
%exitcond = icmp eq i64 %indvars.iv.next, 2048
|
||||
br i1 %exitcond, label %for.end, label %for.body
|
||||
|
Loading…
x
Reference in New Issue
Block a user