Change a bunch of isVolatile() checks to check for atomic load/store as well.

No tests; these changes aren't really interesting in the sense that the logic is the same for volatile and atomic.

I believe this completes all of the changes necessary for the optimizer to handle loads and stores correctly.  I'm going to try and come up with some additional testing, though.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@139533 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Eli Friedman 2011-09-12 20:23:13 +00:00
parent a073795023
commit 2bc3d52b9a
5 changed files with 19 additions and 19 deletions

View File

@ -357,7 +357,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// If this is a non-volatile load, process it. // If this is a non-volatile load, process it.
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
// Ignore volatile loads. // Ignore volatile loads.
if (LI->isVolatile()) { if (!LI->isSimple()) {
LastStore = 0; LastStore = 0;
continue; continue;
} }
@ -437,7 +437,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
std::pair<Value*, unsigned>(SI->getValueOperand(), CurrentGeneration)); std::pair<Value*, unsigned>(SI->getValueOperand(), CurrentGeneration));
// Remember that this was the last store we saw for DSE. // Remember that this was the last store we saw for DSE.
if (!SI->isVolatile()) if (SI->isSimple())
LastStore = SI; LastStore = SI;
} }
} }

View File

@ -811,8 +811,8 @@ bool JumpThreading::ProcessBlock(BasicBlock *BB) {
/// important optimization that encourages jump threading, and needs to be run /// important optimization that encourages jump threading, and needs to be run
/// interlaced with other jump threading tasks. /// interlaced with other jump threading tasks.
bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) { bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
// Don't hack volatile loads. // Don't hack volatile/atomic loads.
if (LI->isVolatile()) return false; if (!LI->isSimple()) return false;
// If the load is defined in a block with exactly one predecessor, it can't be // If the load is defined in a block with exactly one predecessor, it can't be
// partially redundant. // partially redundant.

View File

@ -267,7 +267,7 @@ bool LoopIdiomRecognize::runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
/// processLoopStore - See if this store can be promoted to a memset or memcpy. /// processLoopStore - See if this store can be promoted to a memset or memcpy.
bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) { bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
if (SI->isVolatile()) return false; if (!SI->isSimple()) return false;
Value *StoredVal = SI->getValueOperand(); Value *StoredVal = SI->getValueOperand();
Value *StorePtr = SI->getPointerOperand(); Value *StorePtr = SI->getPointerOperand();
@ -314,7 +314,7 @@ bool LoopIdiomRecognize::processLoopStore(StoreInst *SI, const SCEV *BECount) {
const SCEVAddRecExpr *LoadEv = const SCEVAddRecExpr *LoadEv =
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0))); dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getOperand(0)));
if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() && if (LoadEv && LoadEv->getLoop() == CurLoop && LoadEv->isAffine() &&
StoreEv->getOperand(1) == LoadEv->getOperand(1) && !LI->isVolatile()) StoreEv->getOperand(1) == LoadEv->getOperand(1) && LI->isSimple())
if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount)) if (processLoopStoreOfLoopLoad(SI, StoreSize, StoreEv, LoadEv, BECount))
return true; return true;
} }

View File

@ -3474,7 +3474,7 @@ ObjCARCContract::ContractAutorelease(Function &F, Instruction *Autorelease,
void ObjCARCContract::ContractRelease(Instruction *Release, void ObjCARCContract::ContractRelease(Instruction *Release,
inst_iterator &Iter) { inst_iterator &Iter) {
LoadInst *Load = dyn_cast<LoadInst>(GetObjCArg(Release)); LoadInst *Load = dyn_cast<LoadInst>(GetObjCArg(Release));
if (!Load || Load->isVolatile()) return; if (!Load || !Load->isSimple()) return;
// For now, require everything to be in one basic block. // For now, require everything to be in one basic block.
BasicBlock *BB = Release->getParent(); BasicBlock *BB = Release->getParent();
@ -3490,7 +3490,7 @@ void ObjCARCContract::ContractRelease(Instruction *Release,
!(AA->getModRefInfo(I, Loc) & AliasAnalysis::Mod))) !(AA->getModRefInfo(I, Loc) & AliasAnalysis::Mod)))
++I; ++I;
StoreInst *Store = dyn_cast<StoreInst>(I); StoreInst *Store = dyn_cast<StoreInst>(I);
if (!Store || Store->isVolatile()) return; if (!Store || !Store->isSimple()) return;
if (Store->getPointerOperand() != Loc.Ptr) return; if (Store->getPointerOperand() != Loc.Ptr) return;
Value *New = StripPointerCastsAndObjCCalls(Store->getValueOperand()); Value *New = StripPointerCastsAndObjCCalls(Store->getValueOperand());

View File

@ -489,7 +489,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
if (LoadInst *LI = dyn_cast<LoadInst>(User)) { if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
// Don't break volatile loads. // Don't break volatile loads.
if (LI->isVolatile()) if (!LI->isSimple())
return false; return false;
// Don't touch MMX operations. // Don't touch MMX operations.
if (LI->getType()->isX86_MMXTy()) if (LI->getType()->isX86_MMXTy())
@ -501,7 +501,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
if (StoreInst *SI = dyn_cast<StoreInst>(User)) { if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Storing the pointer, not into the value? // Storing the pointer, not into the value?
if (SI->getOperand(0) == V || SI->isVolatile()) return false; if (SI->getOperand(0) == V || !SI->isSimple()) return false;
// Don't touch MMX operations. // Don't touch MMX operations.
if (SI->getOperand(0)->getType()->isX86_MMXTy()) if (SI->getOperand(0)->getType()->isX86_MMXTy())
return false; return false;
@ -1224,7 +1224,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end(); for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
UI != UE; ++UI) { UI != UE; ++UI) {
LoadInst *LI = dyn_cast<LoadInst>(*UI); LoadInst *LI = dyn_cast<LoadInst>(*UI);
if (LI == 0 || LI->isVolatile()) return false; if (LI == 0 || !LI->isSimple()) return false;
// Both operands to the select need to be dereferencable, either absolutely // Both operands to the select need to be dereferencable, either absolutely
// (e.g. allocas) or at this point because we can see other accesses to it. // (e.g. allocas) or at this point because we can see other accesses to it.
@ -1265,7 +1265,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
for (Value::use_iterator UI = PN->use_begin(), UE = PN->use_end(); for (Value::use_iterator UI = PN->use_begin(), UE = PN->use_end();
UI != UE; ++UI) { UI != UE; ++UI) {
LoadInst *LI = dyn_cast<LoadInst>(*UI); LoadInst *LI = dyn_cast<LoadInst>(*UI);
if (LI == 0 || LI->isVolatile()) return false; if (LI == 0 || !LI->isSimple()) return false;
// For now we only allow loads in the same block as the PHI. This is a // For now we only allow loads in the same block as the PHI. This is a
// common case that happens when instcombine merges two loads through a PHI. // common case that happens when instcombine merges two loads through a PHI.
@ -1323,13 +1323,13 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
UI != UE; ++UI) { UI != UE; ++UI) {
User *U = *UI; User *U = *UI;
if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
if (LI->isVolatile()) if (!LI->isSimple())
return false; return false;
continue; continue;
} }
if (StoreInst *SI = dyn_cast<StoreInst>(U)) { if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
if (SI->getOperand(0) == AI || SI->isVolatile()) if (SI->getOperand(0) == AI || !SI->isSimple())
return false; // Don't allow a store OF the AI, only INTO the AI. return false; // Don't allow a store OF the AI, only INTO the AI.
continue; continue;
} }
@ -1717,7 +1717,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
UI.getOperandNo() == 0, Info, MI, UI.getOperandNo() == 0, Info, MI,
true /*AllowWholeAccess*/); true /*AllowWholeAccess*/);
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
if (LI->isVolatile()) if (!LI->isSimple())
return MarkUnsafe(Info, User); return MarkUnsafe(Info, User);
Type *LIType = LI->getType(); Type *LIType = LI->getType();
isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType), isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
@ -1726,7 +1726,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Store is ok if storing INTO the pointer, not storing the pointer // Store is ok if storing INTO the pointer, not storing the pointer
if (SI->isVolatile() || SI->getOperand(0) == I) if (!SI->isSimple() || SI->getOperand(0) == I)
return MarkUnsafe(Info, User); return MarkUnsafe(Info, User);
Type *SIType = SI->getOperand(0)->getType(); Type *SIType = SI->getOperand(0)->getType();
@ -1776,7 +1776,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
return MarkUnsafe(Info, User); return MarkUnsafe(Info, User);
isSafePHISelectUseForScalarRepl(GEPI, Offset, Info); isSafePHISelectUseForScalarRepl(GEPI, Offset, Info);
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) { } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
if (LI->isVolatile()) if (!LI->isSimple())
return MarkUnsafe(Info, User); return MarkUnsafe(Info, User);
Type *LIType = LI->getType(); Type *LIType = LI->getType();
isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType), isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
@ -1785,7 +1785,7 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) { } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Store is ok if storing INTO the pointer, not storing the pointer // Store is ok if storing INTO the pointer, not storing the pointer
if (SI->isVolatile() || SI->getOperand(0) == I) if (!SI->isSimple() || SI->getOperand(0) == I)
return MarkUnsafe(Info, User); return MarkUnsafe(Info, User);
Type *SIType = SI->getOperand(0)->getType(); Type *SIType = SI->getOperand(0)->getType();
@ -2688,7 +2688,7 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
// Ignore non-volatile loads, they are always ok. // Ignore non-volatile loads, they are always ok.
if (LI->isVolatile()) return false; if (!LI->isSimple()) return false;
continue; continue;
} }