mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-14 11:32:34 +00:00
Re-apply r111568 with a fix for the clang self-host.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@111665 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
e82e7700ea
commit
a4cba04a03
@ -14,11 +14,13 @@
|
||||
#include "InstCombine.h"
|
||||
#include "llvm/IntrinsicInst.h"
|
||||
#include "llvm/Analysis/Loads.h"
|
||||
#include "llvm/Support/PatternMatch.h"
|
||||
#include "llvm/Target/TargetData.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
using namespace llvm;
|
||||
using namespace PatternMatch;
|
||||
|
||||
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
|
||||
|
||||
@ -473,6 +475,51 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||
|
||||
if (SI.isVolatile()) return 0; // Don't hack volatile stores.
|
||||
|
||||
// Attempt to narrow sequences where we load a wide value, perform bitmasks
|
||||
// that only affect the low bits of it, and then store it back. This
|
||||
// typically arises from bitfield initializers in C++.
|
||||
ConstantInt *CI1 =0, *CI2 = 0;
|
||||
Value *Ld = 0;
|
||||
if (getTargetData() &&
|
||||
match(SI.getValueOperand(),
|
||||
m_And(m_Or(m_Value(Ld), m_ConstantInt(CI1)), m_ConstantInt(CI2))) &&
|
||||
isa<LoadInst>(Ld) &&
|
||||
equivalentAddressValues(cast<LoadInst>(Ld)->getPointerOperand(), Ptr)) {
|
||||
APInt OrMask = CI1->getValue();
|
||||
APInt AndMask = CI2->getValue();
|
||||
|
||||
// Compute the prefix of the value that is unmodified by the bitmasking.
|
||||
unsigned LeadingAndOnes = AndMask.countLeadingOnes();
|
||||
unsigned LeadingOrZeros = OrMask.countLeadingZeros();
|
||||
unsigned Prefix = std::min(LeadingAndOnes, LeadingOrZeros);
|
||||
uint64_t NewWidth = AndMask.getBitWidth() - Prefix;
|
||||
while (NewWidth < AndMask.getBitWidth() &&
|
||||
getTargetData()->isIllegalInteger(NewWidth))
|
||||
NewWidth = NextPowerOf2(NewWidth);
|
||||
|
||||
// If we can find a power-of-2 prefix (and if the values we're working with
|
||||
// are themselves POT widths), then we can narrow the store. We rely on
|
||||
// later iterations of instcombine to propagate the demanded bits to narrow
|
||||
// the other computations in the chain.
|
||||
if (NewWidth < AndMask.getBitWidth() &&
|
||||
getTargetData()->isLegalInteger(NewWidth)) {
|
||||
const Type *NewType = IntegerType::get(Ptr->getContext(), NewWidth);
|
||||
const Type *NewPtrType = PointerType::getUnqual(NewType);
|
||||
|
||||
Value *NewVal = Builder->CreateTrunc(SI.getValueOperand(), NewType);
|
||||
Value *NewPtr = Builder->CreateBitCast(Ptr, NewPtrType);
|
||||
|
||||
// On big endian targets, we need to offset from the original pointer
|
||||
// in order to store to the low-bit suffix.
|
||||
if (getTargetData()->isBigEndian()) {
|
||||
uint64_t GEPOffset = (AndMask.getBitWidth() - NewWidth) / 8;
|
||||
NewPtr = Builder->CreateConstGEP1_64(NewPtr, GEPOffset);
|
||||
}
|
||||
|
||||
return new StoreInst(NewVal, NewPtr);
|
||||
}
|
||||
}
|
||||
|
||||
// store X, null -> turns into 'unreachable' in SimplifyCFG
|
||||
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
|
||||
if (!isa<UndefValue>(Val)) {
|
||||
|
21
test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll
Normal file
21
test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll
Normal file
@ -0,0 +1,21 @@
|
||||
; RUN: opt -S -instcombine %s | not grep and
|
||||
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
||||
target triple = "x86_64-apple-darwin10.0.0"
|
||||
|
||||
%class.A = type { i8, [3 x i8] }
|
||||
|
||||
define void @_ZN1AC2Ev(%class.A* %this) nounwind ssp align 2 {
|
||||
entry:
|
||||
%0 = bitcast %class.A* %this to i32* ; <i32*> [#uses=5]
|
||||
%1 = load i32* %0, align 4 ; <i32> [#uses=1]
|
||||
%2 = and i32 %1, -8 ; <i32> [#uses=2]
|
||||
store i32 %2, i32* %0, align 4
|
||||
%3 = and i32 %2, -57 ; <i32> [#uses=1]
|
||||
%4 = or i32 %3, 8 ; <i32> [#uses=2]
|
||||
store i32 %4, i32* %0, align 4
|
||||
%5 = and i32 %4, -65 ; <i32> [#uses=2]
|
||||
store i32 %5, i32* %0, align 4
|
||||
%6 = and i32 %5, -129 ; <i32> [#uses=1]
|
||||
store i32 %6, i32* %0, align 4
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue
Block a user