mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-11-01 15:11:24 +00:00
Switch this code to use hash_combine_range rather than incremental calls
to hash_combine. One of the interfaces could already do this, and the other can just use a small buffer. This is a much more efficient way to use the hash_combine interface, although I don't have any particular benchmark where this code was hot, so I can't measure much of an impact. It at least doesn't slow anything down. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@152200 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
f8cde7388e
commit
eea81f32cd
@ -657,20 +657,18 @@ private:
|
||||
return ConstantClassInfo::getTombstoneKey();
|
||||
}
|
||||
static unsigned getHashValue(const ConstantClass *CP) {
|
||||
hash_code code = hash_value(CP->getType());
|
||||
SmallVector<Constant*, 8> CPOperands;
|
||||
CPOperands.reserve(CP->getNumOperands());
|
||||
for (unsigned I = 0, E = CP->getNumOperands(); I < E; ++I)
|
||||
code = hash_combine(code, hash_value(CP->getOperand(I)));
|
||||
return code;
|
||||
CPOperands.push_back(CP->getOperand(I));
|
||||
return getHashValue(LookupKey(CP->getType(), CPOperands));
|
||||
}
|
||||
static bool isEqual(const ConstantClass *LHS, const ConstantClass *RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
static unsigned getHashValue(const LookupKey &Val) {
|
||||
hash_code code = hash_value(Val.first);
|
||||
for (Operands::const_iterator
|
||||
I = Val.second.begin(), E = Val.second.end(); I != E; ++I)
|
||||
code = hash_combine(code, hash_value(*I));
|
||||
return code;
|
||||
return hash_combine(Val.first, hash_combine_range(Val.second.begin(),
|
||||
Val.second.end()));
|
||||
}
|
||||
static bool isEqual(const LookupKey &LHS, const ConstantClass *RHS) {
|
||||
if (RHS == getEmptyKey() || RHS == getTombstoneKey())
|
||||
|
Loading…
Reference in New Issue
Block a user