mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-01-27 14:34:58 +00:00
AArch64: correct CodeGen of MOVZ/MOVK combinations.
According to the AArch64 ELF specification (4.6.8), it's the assembler's responsibility to make sure the shift amount is correct in relocated MOVZ/MOVK instructions. This wasn't being obeyed by either the MCJIT CodeGen or RuntimeDyldELF (which happened to work out well for JIT tests). This commit should make us compliant in this area. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@185360 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
1a84066b8c
commit
6711fc28a4
@ -334,8 +334,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
||||
*TargetPtr &= 0xff80001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
*TargetPtr |= Result >> (48 - 5);
|
||||
// Shift is "lsl #48", in bits 22:21
|
||||
*TargetPtr |= 3 << 21;
|
||||
// Shift must be "lsl #48", in bits 22:21
|
||||
assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation");
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
|
||||
@ -347,8 +347,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
||||
*TargetPtr &= 0xff80001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
*TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5));
|
||||
// Shift is "lsl #32", in bits 22:21
|
||||
*TargetPtr |= 2 << 21;
|
||||
// Shift must be "lsl #32", in bits 22:21
|
||||
assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation");
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
|
||||
@ -359,8 +359,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
||||
*TargetPtr &= 0xff80001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
*TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5));
|
||||
// Shift is "lsl #16", in bits 22:21
|
||||
*TargetPtr |= 1 << 21;
|
||||
// Shift must be "lsl #16", in bits 22:2
|
||||
assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation");
|
||||
break;
|
||||
}
|
||||
case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
|
||||
@ -371,7 +371,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
|
||||
*TargetPtr &= 0xff80001fU;
|
||||
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
|
||||
*TargetPtr |= ((Result & 0xffffU) << 5);
|
||||
// Shift is "lsl #0", in bits 22:21. No action needed.
|
||||
// Shift must be "lsl #0", in bits 22:21.
|
||||
assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -70,10 +70,11 @@ public:
|
||||
|
||||
/// Used for pre-lowered address-reference nodes, so we already know
|
||||
/// the fields match. This operand's job is simply to add an
|
||||
/// appropriate shift operand (i.e. 0) to the MOVZ/MOVK instruction.
|
||||
/// appropriate shift operand to the MOVZ/MOVK instruction.
|
||||
template<unsigned LogShift>
|
||||
bool SelectMOVWAddressRef(SDValue N, SDValue &Imm, SDValue &Shift) {
|
||||
Imm = N;
|
||||
Shift = CurDAG->getTargetConstant(0, MVT::i32);
|
||||
Shift = CurDAG->getTargetConstant(LogShift, MVT::i32);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3974,14 +3974,17 @@ def : movalias<MOVZxii, GPR64, movz64_movimm>;
|
||||
def : movalias<MOVNwii, GPR32, movn32_movimm>;
|
||||
def : movalias<MOVNxii, GPR64, movn64_movimm>;
|
||||
|
||||
def movw_addressref : ComplexPattern<i64, 2, "SelectMOVWAddressRef">;
|
||||
def movw_addressref_g0 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<0>">;
|
||||
def movw_addressref_g1 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<1>">;
|
||||
def movw_addressref_g2 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<2>">;
|
||||
def movw_addressref_g3 : ComplexPattern<i64, 2, "SelectMOVWAddressRef<3>">;
|
||||
|
||||
def : Pat<(A64WrapperLarge movw_addressref:$G3, movw_addressref:$G2,
|
||||
movw_addressref:$G1, movw_addressref:$G0),
|
||||
(MOVKxii (MOVKxii (MOVKxii (MOVZxii movw_addressref:$G3),
|
||||
movw_addressref:$G2),
|
||||
movw_addressref:$G1),
|
||||
movw_addressref:$G0)>;
|
||||
def : Pat<(A64WrapperLarge movw_addressref_g3:$G3, movw_addressref_g2:$G2,
|
||||
movw_addressref_g1:$G1, movw_addressref_g0:$G0),
|
||||
(MOVKxii (MOVKxii (MOVKxii (MOVZxii movw_addressref_g3:$G3),
|
||||
movw_addressref_g2:$G2),
|
||||
movw_addressref_g1:$G1),
|
||||
movw_addressref_g0:$G0)>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// PC-relative addressing instructions
|
||||
|
14
test/CodeGen/AArch64/movw-shift-encoding.ll
Normal file
14
test/CodeGen/AArch64/movw-shift-encoding.ll
Normal file
@ -0,0 +1,14 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu < %s -show-mc-encoding -code-model=large | FileCheck %s
|
||||
|
||||
@var = global i32 0
|
||||
|
||||
; CodeGen should ensure that the correct shift bits are set, because the linker
|
||||
; isn't going to!
|
||||
|
||||
define i32* @get_var() {
|
||||
ret i32* @var
|
||||
; CHECK: movz x0, #:abs_g3:var // encoding: [A,A,0xe0'A',0xd2'A']
|
||||
; CHECK: movk x0, #:abs_g2_nc:var // encoding: [A,A,0xc0'A',0xf2'A']
|
||||
; CHECK: movk x0, #:abs_g1_nc:var // encoding: [A,A,0xa0'A',0xf2'A']
|
||||
; CHECK: movk x0, #:abs_g0_nc:var // encoding: [A,A,0x80'A',0xf2'A']
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user