mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-07-25 13:24:46 +00:00
Revert r111568 to unbreak clang self-host.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@111571 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -14,13 +14,11 @@
|
||||
#include "InstCombine.h"
|
||||
#include "llvm/IntrinsicInst.h"
|
||||
#include "llvm/Analysis/Loads.h"
|
||||
#include "llvm/Support/PatternMatch.h"
|
||||
#include "llvm/Target/TargetData.h"
|
||||
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
|
||||
#include "llvm/Transforms/Utils/Local.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
using namespace llvm;
|
||||
using namespace PatternMatch;
|
||||
|
||||
STATISTIC(NumDeadStore, "Number of dead stores eliminated");
|
||||
|
||||
@@ -475,49 +473,6 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
|
||||
|
||||
if (SI.isVolatile()) return 0; // Don't hack volatile stores.
|
||||
|
||||
// Attempt to narrow sequences where we load a wide value, perform bitmasks
|
||||
// that only affect the low bits of it, and then store it back. This
|
||||
// typically arises from bitfield initializers in C++.
|
||||
ConstantInt *CI1 =0, *CI2 = 0;
|
||||
Value *Ld = 0;
|
||||
if (getTargetData() &&
|
||||
match(SI.getValueOperand(),
|
||||
m_And(m_Or(m_Value(Ld), m_ConstantInt(CI1)), m_ConstantInt(CI2))) &&
|
||||
isa<LoadInst>(Ld) &&
|
||||
equivalentAddressValues(cast<LoadInst>(Ld)->getPointerOperand(), Ptr)) {
|
||||
APInt OrMask = CI1->getValue();
|
||||
APInt AndMask = CI2->getValue();
|
||||
|
||||
// Compute the prefix of the value that is unmodified by the bitmasking.
|
||||
unsigned LeadingAndOnes = AndMask.countLeadingOnes();
|
||||
unsigned LeadingOrZeros = OrMask.countLeadingZeros();
|
||||
unsigned Prefix = std::min(LeadingAndOnes, LeadingOrZeros);
|
||||
uint64_t NewWidth = AndMask.getBitWidth() - Prefix;
|
||||
if (!isPowerOf2_64(NewWidth)) NewWidth = NextPowerOf2(NewWidth);
|
||||
|
||||
// If we can find a power-of-2 prefix (and if the values we're working with
|
||||
// are themselves POT widths), then we can narrow the store. We rely on
|
||||
// later iterations of instcombine to propagate the demanded bits to narrow
|
||||
// the other computations in the chain.
|
||||
if (NewWidth < AndMask.getBitWidth() &&
|
||||
isPowerOf2_64(AndMask.getBitWidth())) {
|
||||
const Type *NewType = IntegerType::get(Ptr->getContext(), NewWidth);
|
||||
const Type *NewPtrType = PointerType::getUnqual(NewType);
|
||||
|
||||
Value *NewVal = Builder->CreateTrunc(SI.getValueOperand(), NewType);
|
||||
Value *NewPtr = Builder->CreateBitCast(Ptr, NewPtrType);
|
||||
|
||||
// On big endian targets, we need to offset from the original pointer
|
||||
// in order to store to the low-bit suffix.
|
||||
if (getTargetData()->isBigEndian()) {
|
||||
uint64_t GEPOffset = (AndMask.getBitWidth() - NewWidth) / 8;
|
||||
NewPtr = Builder->CreateConstGEP1_64(NewPtr, GEPOffset);
|
||||
}
|
||||
|
||||
return new StoreInst(NewVal, NewPtr);
|
||||
}
|
||||
}
|
||||
|
||||
// store X, null -> turns into 'unreachable' in SimplifyCFG
|
||||
if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
|
||||
if (!isa<UndefValue>(Val)) {
|
||||
|
Reference in New Issue
Block a user