This Patch corrects a problem whereby the optimization to use a faster divide

instruction (for Intel Atom) was not being done by Clang, because
the type context used by Clang is not the default context.

It fixes the problem by getting the global context types for each div/rem
instruction in order to compare them against the types in the BypassTypeMap.

Tests for this will be done as a separate patch to Clang.

Patch by Tyler Nowicki.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165126 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Preston Gurd 2012-10-03 16:11:44 +00:00
parent c653de6c0f
commit fcf0628d93

View File

@ -238,14 +238,24 @@ bool llvm::bypassSlowDivision(Function &F,
if (!UseDivOp && !UseRemOp)
continue;
// Continue if div/rem type is not bypassed
DenseMap<Type *, Type *>::const_iterator BT =
BypassTypeMap.find(J->getType());
if (BT == BypassTypeMap.end())
// Skip division on vector types, only optimize integer instructions
if (!J->getType()->isIntegerTy())
continue;
IntegerType *BypassType = cast<IntegerType>(BT->second);
MadeChange |= reuseOrInsertFastDiv(F, I, J, BypassType, UseDivOp,
// Get same type in global context
IntegerType *T = cast<IntegerType>(J->getType());
IntegerType *GT = IntegerType::get(getGlobalContext(), T->getBitWidth());
// Continue if div/rem type is not bypassed
DenseMap<Type *, Type *>::const_iterator BI = BypassTypeMap.find(GT);
if (BI == BypassTypeMap.end())
continue;
// Get the bypass type in the original context
IntegerType *GBT = cast<IntegerType>(BI->second);
IntegerType *BT = IntegerType::get(J->getContext(), GBT->getBitWidth());
MadeChange |= reuseOrInsertFastDiv(F, I, J, BT, UseDivOp,
UseSignedOp, DivCache);
}