AArch64/ARM64: implement "mov $Rd, $Imm" aliases in TableGen.

This is a slightly different approach to AArch64 (the base instruction
definitions aren't quite right for that to work), but achieves the
same thing and reduces C++ hackery in AsmParser.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@208605 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tim Northover 2014-05-12 18:03:36 +00:00
parent 5049ca67c2
commit c56a5421b8
3 changed files with 94 additions and 58 deletions

View File

@ -1322,6 +1322,7 @@ def movimm64_shift : Operand<i32> {
let PrintMethod = "printShifter"; let PrintMethod = "printShifter";
let ParserMatchClass = MovImm64ShifterOperand; let ParserMatchClass = MovImm64ShifterOperand;
} }
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in
class BaseMoveImmediate<bits<2> opc, RegisterClass regtype, Operand shifter, class BaseMoveImmediate<bits<2> opc, RegisterClass regtype, Operand shifter,
string asm> string asm>

View File

@ -365,6 +365,7 @@ defm MOVN : MoveImmediate<0b00, "movn">;
let PostEncoderMethod = "fixMOVZ" in let PostEncoderMethod = "fixMOVZ" in
defm MOVZ : MoveImmediate<0b10, "movz">; defm MOVZ : MoveImmediate<0b10, "movz">;
// First group of aliases covers an implicit "lsl #0".
def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0)>; def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0)>;
def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0)>; def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0)>;
def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>; def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>;
@ -372,6 +373,7 @@ def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>;
def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>; def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>;
def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>; def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>;
// Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>; def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>; def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>; def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
@ -396,6 +398,40 @@ def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16)>; def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16)>;
def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0)>; def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0)>;
// Final group of aliases covers true "mov $Rd, $imm" cases.
multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
int width, int shift> {
def _asmoperand : AsmOperandClass {
let Name = basename # width # "_lsl" # shift # "MovAlias";
let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
# shift # ">";
let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
}
def _movimm : Operand<i32> {
let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
}
def : InstAlias<"mov $Rd, $imm",
(INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
}
defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1, let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
isAsCheapAsAMove = 1 in { isAsCheapAsAMove = 1 in {
// FIXME: The following pseudo instructions are only needed because remat // FIXME: The following pseudo instructions are only needed because remat

View File

@ -724,6 +724,44 @@ public:
return isMovWSymbol(Variants); return isMovWSymbol(Variants);
} }
template<int RegWidth, int Shift>
bool isMOVZMovAlias() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t Value = CE->getValue();
if (RegWidth == 32)
Value &= 0xffffffffULL;
// "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
if (Value == 0 && Shift != 0)
return false;
return (Value & ~(0xffffULL << Shift)) == 0;
}
template<int RegWidth, int Shift>
bool isMOVNMovAlias() const {
if (!isImm()) return false;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
if (!CE) return false;
uint64_t Value = CE->getValue();
// MOVZ takes precedence over MOVN.
for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
if ((Value & ~(0xffffULL << MOVZShift)) == 0)
return false;
Value = ~Value;
if (RegWidth == 32)
Value &= 0xffffffffULL;
return (Value & ~(0xffffULL << Shift)) == 0;
}
bool isFPImm() const { return Kind == k_FPImm; } bool isFPImm() const { return Kind == k_FPImm; }
bool isBarrier() const { return Kind == k_Barrier; } bool isBarrier() const { return Kind == k_Barrier; }
bool isSysReg() const { return Kind == k_SysReg; } bool isSysReg() const { return Kind == k_SysReg; }
@ -1473,6 +1511,24 @@ public:
Inst.addOperand(MCOperand::CreateImm(Imm)); Inst.addOperand(MCOperand::CreateImm(Imm));
} }
template<int Shift>
void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
uint64_t Value = CE->getValue();
Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
}
template<int Shift>
void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
uint64_t Value = CE->getValue();
Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
}
void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) { void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
assert(N == 3 && "Invalid number of operands!"); assert(N == 3 && "Invalid number of operands!");
@ -3723,63 +3779,7 @@ bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
StringRef Tok = Op->getToken(); StringRef Tok = Op->getToken();
unsigned NumOperands = Operands.size(); unsigned NumOperands = Operands.size();
if (Tok == "mov" && NumOperands == 3) { if (NumOperands == 4 && Tok == "lsl") {
// The MOV mnemomic is aliased to movn/movz, depending on the value of
// the immediate being instantiated.
// FIXME: Catching this here is a total hack, and we should use tblgen
// support to implement this instead as soon as it is available.
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
if (Op2->isImm()) {
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
uint64_t Val = CE->getValue();
uint64_t NVal = ~Val;
// If this is a 32-bit register and the value has none of the upper
// set, clear the complemented upper 32-bits so the logic below works
// for 32-bit registers too.
ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
if (Op1->isReg() &&
ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
Op1->getReg()) &&
(Val & 0xFFFFFFFFULL) == Val)
NVal &= 0x00000000FFFFFFFFULL;
// MOVK Rd, imm << 0
if ((Val & 0xFFFF) == Val)
rewriteMOVI(Operands, "movz", Val, 0, getContext());
// MOVK Rd, imm << 16
else if ((Val & 0xFFFF0000ULL) == Val)
rewriteMOVI(Operands, "movz", Val, 16, getContext());
// MOVK Rd, imm << 32
else if ((Val & 0xFFFF00000000ULL) == Val)
rewriteMOVI(Operands, "movz", Val, 32, getContext());
// MOVK Rd, imm << 48
else if ((Val & 0xFFFF000000000000ULL) == Val)
rewriteMOVI(Operands, "movz", Val, 48, getContext());
// MOVN Rd, (~imm << 0)
else if ((NVal & 0xFFFFULL) == NVal)
rewriteMOVI(Operands, "movn", NVal, 0, getContext());
// MOVN Rd, ~(imm << 16)
else if ((NVal & 0xFFFF0000ULL) == NVal)
rewriteMOVI(Operands, "movn", NVal, 16, getContext());
// MOVN Rd, ~(imm << 32)
else if ((NVal & 0xFFFF00000000ULL) == NVal)
rewriteMOVI(Operands, "movn", NVal, 32, getContext());
// MOVN Rd, ~(imm << 48)
else if ((NVal & 0xFFFF000000000000ULL) == NVal)
rewriteMOVI(Operands, "movn", NVal, 48, getContext());
}
}
} else if (NumOperands == 4) {
if (NumOperands == 4 && Tok == "lsl") {
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]); ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]); ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
if (Op2->isReg() && Op3->isImm()) { if (Op2->isReg() && Op3->isImm()) {
@ -3812,7 +3812,6 @@ bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
delete Op; delete Op;
} }
} }
// FIXME: Horrible hack to handle the optional LSL shift for vector // FIXME: Horrible hack to handle the optional LSL shift for vector
// instructions. // instructions.
} else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) { } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {