mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-28 19:31:58 +00:00
AArch64/ARM64: use InstAliases for NEON logical (imm) instructions.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@208606 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
c56a5421b8
commit
2161fd6114
@ -3638,6 +3638,25 @@ defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", ARM64bici>;
|
||||
// AdvSIMD ORR
|
||||
defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", ARM64orri>;
|
||||
|
||||
def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
|
||||
|
||||
def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
|
||||
|
||||
def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
|
||||
|
||||
def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
|
||||
|
||||
// AdvSIMD FMOV
|
||||
def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0b1111, V128, fpimm8,
|
||||
@ -3708,6 +3727,17 @@ def : Pat<(v4f32 (ARM64dup (f32 fpimm0))), (MOVIv2d_ns (i32 0))>;
|
||||
|
||||
// EDIT per word & halfword: 2s, 4h, 4s, & 8h
|
||||
defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
|
||||
|
||||
def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0)>;
|
||||
|
||||
def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
|
||||
|
||||
def : Pat<(v2i32 (ARM64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
|
||||
(MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
|
||||
def : Pat<(v4i32 (ARM64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
|
||||
@ -3737,6 +3767,17 @@ def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0b1110, V128, imm0_255,
|
||||
|
||||
// EDIT per word & halfword: 2s, 4h, 4s, & 8h
|
||||
defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
|
||||
|
||||
def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0)>;
|
||||
def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0)>;
|
||||
|
||||
def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
|
||||
def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
|
||||
|
||||
def : Pat<(v2i32 (ARM64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
|
||||
(MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
|
||||
def : Pat<(v4i32 (ARM64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
|
||||
|
@ -3665,24 +3665,6 @@ bool ARM64AsmParser::validateInstruction(MCInst &Inst,
|
||||
}
|
||||
}
|
||||
|
||||
static void rewriteMOVI(ARM64AsmParser::OperandVector &Operands,
|
||||
StringRef mnemonic, uint64_t imm, unsigned shift,
|
||||
MCContext &Context) {
|
||||
ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
|
||||
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
|
||||
Operands[0] =
|
||||
ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
|
||||
|
||||
const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
|
||||
Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
|
||||
Op2->getEndLoc(), Context);
|
||||
|
||||
Operands.push_back(ARM64Operand::CreateShiftExtend(
|
||||
ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
|
||||
delete Op2;
|
||||
delete Op;
|
||||
}
|
||||
|
||||
bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
|
||||
switch (ErrCode) {
|
||||
case Match_MissingFeature:
|
||||
@ -3780,62 +3762,34 @@ bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
|
||||
unsigned NumOperands = Operands.size();
|
||||
|
||||
if (NumOperands == 4 && Tok == "lsl") {
|
||||
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
|
||||
ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
|
||||
if (Op2->isReg() && Op3->isImm()) {
|
||||
const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
|
||||
if (Op3CE) {
|
||||
uint64_t Op3Val = Op3CE->getValue();
|
||||
uint64_t NewOp3Val = 0;
|
||||
uint64_t NewOp4Val = 0;
|
||||
if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
|
||||
Op2->getReg())) {
|
||||
NewOp3Val = (32 - Op3Val) & 0x1f;
|
||||
NewOp4Val = 31 - Op3Val;
|
||||
} else {
|
||||
NewOp3Val = (64 - Op3Val) & 0x3f;
|
||||
NewOp4Val = 63 - Op3Val;
|
||||
}
|
||||
|
||||
const MCExpr *NewOp3 =
|
||||
MCConstantExpr::Create(NewOp3Val, getContext());
|
||||
const MCExpr *NewOp4 =
|
||||
MCConstantExpr::Create(NewOp4Val, getContext());
|
||||
|
||||
Operands[0] = ARM64Operand::CreateToken(
|
||||
"ubfm", false, Op->getStartLoc(), getContext());
|
||||
Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
|
||||
Op3->getEndLoc(), getContext());
|
||||
Operands.push_back(ARM64Operand::CreateImm(
|
||||
NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
|
||||
delete Op3;
|
||||
delete Op;
|
||||
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
|
||||
ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
|
||||
if (Op2->isReg() && Op3->isImm()) {
|
||||
const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
|
||||
if (Op3CE) {
|
||||
uint64_t Op3Val = Op3CE->getValue();
|
||||
uint64_t NewOp3Val = 0;
|
||||
uint64_t NewOp4Val = 0;
|
||||
if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
|
||||
Op2->getReg())) {
|
||||
NewOp3Val = (32 - Op3Val) & 0x1f;
|
||||
NewOp4Val = 31 - Op3Val;
|
||||
} else {
|
||||
NewOp3Val = (64 - Op3Val) & 0x3f;
|
||||
NewOp4Val = 63 - Op3Val;
|
||||
}
|
||||
}
|
||||
// FIXME: Horrible hack to handle the optional LSL shift for vector
|
||||
// instructions.
|
||||
} else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
|
||||
ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
|
||||
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
|
||||
ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
|
||||
if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
|
||||
(Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
|
||||
Operands.push_back(ARM64Operand::CreateShiftExtend(
|
||||
ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
|
||||
} else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
|
||||
ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
|
||||
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
|
||||
ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
|
||||
if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
|
||||
(Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
|
||||
StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
|
||||
// Canonicalize on lower-case for ease of comparison.
|
||||
std::string CanonicalSuffix = Suffix.lower();
|
||||
if (Tok != "movi" ||
|
||||
(CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
|
||||
CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
|
||||
Operands.push_back(ARM64Operand::CreateShiftExtend(
|
||||
ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
|
||||
|
||||
const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
|
||||
const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
|
||||
|
||||
Operands[0] = ARM64Operand::CreateToken(
|
||||
"ubfm", false, Op->getStartLoc(), getContext());
|
||||
Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
|
||||
Op3->getEndLoc(), getContext());
|
||||
Operands.push_back(ARM64Operand::CreateImm(
|
||||
NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
|
||||
delete Op3;
|
||||
delete Op;
|
||||
}
|
||||
}
|
||||
} else if (NumOperands == 5) {
|
||||
|
Loading…
Reference in New Issue
Block a user