Support X86_64_GOTLoad relocations in RuntimeDyldMachO by treating them the

same way as X86_64_GOT relocations. The 'Load' part of GOTLoad is just an
optimization hint for the linker anyway, and can be safely ignored.

This patch also fixes some minor issues with the relocations introduced while
processing an X86_64_GOT[Load]: the addend for the GOT entry should always be
zero, and the addend for the replacement relocation at the original offset
should be the same as the addend of the relocation being replaced.

I haven't come up with a good way of testing this yet, but I'm working on it.

This fixes <rdar://problem/14651564>.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188499 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Lang Hames 2013-08-15 22:31:40 +00:00
parent b187b69170
commit ae7ac3974c

View File

@ -339,7 +339,8 @@ void RuntimeDyldMachO::processRelocationRef(unsigned SectionID,
Value.Addend = Addend - Addr;
}
if (Arch == Triple::x86_64 && RelType == macho::RIT_X86_64_GOT) {
if (Arch == Triple::x86_64 && (RelType == macho::RIT_X86_64_GOT ||
RelType == macho::RIT_X86_64_GOTLoad)) {
assert(IsPCRel);
assert(Size == 2);
StubMap::const_iterator i = Stubs.find(Value);
@ -350,8 +351,7 @@ void RuntimeDyldMachO::processRelocationRef(unsigned SectionID,
Stubs[Value] = Section.StubOffset;
uint8_t *GOTEntry = Section.Address + Section.StubOffset;
RelocationEntry RE(SectionID, Section.StubOffset,
macho::RIT_X86_64_Unsigned, Value.Addend - 4, false,
3);
macho::RIT_X86_64_Unsigned, 0, false, 3);
if (Value.SymbolName)
addRelocationForSymbol(RE, Value.SymbolName);
else
@ -360,7 +360,7 @@ void RuntimeDyldMachO::processRelocationRef(unsigned SectionID,
Addr = GOTEntry;
}
resolveRelocation(Section, Offset, (uint64_t)Addr,
macho::RIT_X86_64_Unsigned, 4, true, 2);
macho::RIT_X86_64_Unsigned, Value.Addend, true, 2);
} else if (Arch == Triple::arm &&
(RelType & 0xf) == macho::RIT_ARM_Branch24Bit) {
// This is an ARM branch relocation, need to use a stub function.