Reduce the offsets in DwarfDebugInfoEntry to 32 bit, they're printed with %x and

that breaks on big-endian machines.

I have to clean up the 32/64 bit confusion in libDebugInfo some day.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@143812 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Benjamin Kramer 2011-11-05 15:35:00 +00:00
parent 3f4c979e1b
commit 80cc2598f8
2 changed files with 3 additions and 3 deletions

View File

@ -26,7 +26,7 @@ void DWARFDebugInfoEntryMinimal::dump(raw_ostream &OS,
uint32_t offset = Offset;
if (debug_info_data.isValidOffset(offset)) {
uint64_t abbrCode = debug_info_data.getULEB128(&offset);
uint32_t abbrCode = debug_info_data.getULEB128(&offset);
OS << format("\n0x%8.8x: ", Offset);
if (abbrCode) {

View File

@ -23,7 +23,7 @@ class DWARFFormValue;
/// DWARFDebugInfoEntryMinimal - A DIE with only the minimum required data.
class DWARFDebugInfoEntryMinimal {
/// Offset within the .debug_info of the start of this entry.
uint64_t Offset;
uint32_t Offset;
/// How many to subtract from "this" to get the parent.
/// If zero this die has no parent.
@ -52,7 +52,7 @@ public:
uint32_t getTag() const { return AbbrevDecl ? AbbrevDecl->getTag() : 0; }
bool isNULL() const { return AbbrevDecl == 0; }
uint64_t getOffset() const { return Offset; }
uint32_t getOffset() const { return Offset; }
uint32_t getNumAttributes() const {
return !isNULL() ? AbbrevDecl->getNumAttributes() : 0;
}