mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-06 06:33:24 +00:00
ARM NEON assembly parsing and encoding for VDUP(scalar).
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@141446 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
c570191060
commit
460a90540b
@ -1951,7 +1951,6 @@ class NVDupLane<bits<4> op19_16, bit op6, dag oops, dag iops,
|
||||
|
||||
bits<5> Vd;
|
||||
bits<5> Vm;
|
||||
bits<4> lane;
|
||||
|
||||
let Inst{22} = Vd{4};
|
||||
let Inst{15-12} = Vd{3-0};
|
||||
|
@ -11,6 +11,35 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// NEON-specific Operands.
|
||||
//===----------------------------------------------------------------------===//
|
||||
def VectorIndex8Operand : AsmOperandClass { let Name = "VectorIndex8"; }
|
||||
def VectorIndex16Operand : AsmOperandClass { let Name = "VectorIndex16"; }
|
||||
def VectorIndex32Operand : AsmOperandClass { let Name = "VectorIndex32"; }
|
||||
def VectorIndex8 : Operand<i32>, ImmLeaf<i32, [{
|
||||
return ((uint64_t)Imm) < 8;
|
||||
}]> {
|
||||
let ParserMatchClass = VectorIndex8Operand;
|
||||
let PrintMethod = "printVectorIndex";
|
||||
let MIOperandInfo = (ops i32imm);
|
||||
}
|
||||
def VectorIndex16 : Operand<i32>, ImmLeaf<i32, [{
|
||||
return ((uint64_t)Imm) < 4;
|
||||
}]> {
|
||||
let ParserMatchClass = VectorIndex16Operand;
|
||||
let PrintMethod = "printVectorIndex";
|
||||
let MIOperandInfo = (ops i32imm);
|
||||
}
|
||||
def VectorIndex32 : Operand<i32>, ImmLeaf<i32, [{
|
||||
return ((uint64_t)Imm) < 2;
|
||||
}]> {
|
||||
let ParserMatchClass = VectorIndex32Operand;
|
||||
let PrintMethod = "printVectorIndex";
|
||||
let MIOperandInfo = (ops i32imm);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// NEON-specific DAG Nodes.
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -4518,36 +4547,42 @@ def : Pat<(v4f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32q GPR:$R)>;
|
||||
// VDUP : Vector Duplicate Lane (from scalar to all elements)
|
||||
|
||||
class VDUPLND<bits<4> op19_16, string OpcodeStr, string Dt,
|
||||
ValueType Ty>
|
||||
: NVDupLane<op19_16, 0, (outs DPR:$Vd), (ins DPR:$Vm, nohash_imm:$lane),
|
||||
IIC_VMOVD, OpcodeStr, Dt, "$Vd, $Vm[$lane]",
|
||||
ValueType Ty, Operand IdxTy>
|
||||
: NVDupLane<op19_16, 0, (outs DPR:$Vd), (ins DPR:$Vm, IdxTy:$lane),
|
||||
IIC_VMOVD, OpcodeStr, Dt, "$Vd, $Vm$lane",
|
||||
[(set DPR:$Vd, (Ty (NEONvduplane (Ty DPR:$Vm), imm:$lane)))]>;
|
||||
|
||||
class VDUPLNQ<bits<4> op19_16, string OpcodeStr, string Dt,
|
||||
ValueType ResTy, ValueType OpTy>
|
||||
: NVDupLane<op19_16, 1, (outs QPR:$Vd), (ins DPR:$Vm, nohash_imm:$lane),
|
||||
IIC_VMOVQ, OpcodeStr, Dt, "$Vd, $Vm[$lane]",
|
||||
ValueType ResTy, ValueType OpTy, Operand IdxTy>
|
||||
: NVDupLane<op19_16, 1, (outs QPR:$Vd), (ins DPR:$Vm, IdxTy:$lane),
|
||||
IIC_VMOVQ, OpcodeStr, Dt, "$Vd, $Vm$lane",
|
||||
[(set QPR:$Vd, (ResTy (NEONvduplane (OpTy DPR:$Vm),
|
||||
imm:$lane)))]>;
|
||||
VectorIndex32:$lane)))]>;
|
||||
|
||||
// Inst{19-16} is partially specified depending on the element size.
|
||||
|
||||
def VDUPLN8d : VDUPLND<{?,?,?,1}, "vdup", "8", v8i8> {
|
||||
def VDUPLN8d : VDUPLND<{?,?,?,1}, "vdup", "8", v8i8, VectorIndex8> {
|
||||
bits<3> lane;
|
||||
let Inst{19-17} = lane{2-0};
|
||||
}
|
||||
def VDUPLN16d : VDUPLND<{?,?,1,0}, "vdup", "16", v4i16> {
|
||||
def VDUPLN16d : VDUPLND<{?,?,1,0}, "vdup", "16", v4i16, VectorIndex16> {
|
||||
bits<2> lane;
|
||||
let Inst{19-18} = lane{1-0};
|
||||
}
|
||||
def VDUPLN32d : VDUPLND<{?,1,0,0}, "vdup", "32", v2i32> {
|
||||
def VDUPLN32d : VDUPLND<{?,1,0,0}, "vdup", "32", v2i32, VectorIndex32> {
|
||||
bits<1> lane;
|
||||
let Inst{19} = lane{0};
|
||||
}
|
||||
def VDUPLN8q : VDUPLNQ<{?,?,?,1}, "vdup", "8", v16i8, v8i8> {
|
||||
def VDUPLN8q : VDUPLNQ<{?,?,?,1}, "vdup", "8", v16i8, v8i8, VectorIndex8> {
|
||||
bits<3> lane;
|
||||
let Inst{19-17} = lane{2-0};
|
||||
}
|
||||
def VDUPLN16q : VDUPLNQ<{?,?,1,0}, "vdup", "16", v8i16, v4i16> {
|
||||
def VDUPLN16q : VDUPLNQ<{?,?,1,0}, "vdup", "16", v8i16, v4i16, VectorIndex16> {
|
||||
bits<2> lane;
|
||||
let Inst{19-18} = lane{1-0};
|
||||
}
|
||||
def VDUPLN32q : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4i32, v2i32> {
|
||||
def VDUPLN32q : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4i32, v2i32, VectorIndex32> {
|
||||
bits<1> lane;
|
||||
let Inst{19} = lane{0};
|
||||
}
|
||||
|
||||
|
@ -254,6 +254,7 @@ class ARMOperand : public MCParsedAsmOperand {
|
||||
k_PostIndexRegister,
|
||||
k_MSRMask,
|
||||
k_ProcIFlags,
|
||||
k_VectorIndex,
|
||||
k_Register,
|
||||
k_RegisterList,
|
||||
k_DPRRegisterList,
|
||||
@ -303,6 +304,10 @@ class ARMOperand : public MCParsedAsmOperand {
|
||||
unsigned RegNum;
|
||||
} Reg;
|
||||
|
||||
struct {
|
||||
unsigned Val;
|
||||
} VectorIndex;
|
||||
|
||||
struct {
|
||||
const MCExpr *Val;
|
||||
} Imm;
|
||||
@ -419,6 +424,9 @@ public:
|
||||
case k_BitfieldDescriptor:
|
||||
Bitfield = o.Bitfield;
|
||||
break;
|
||||
case k_VectorIndex:
|
||||
VectorIndex = o.VectorIndex;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -463,6 +471,11 @@ public:
|
||||
return FPImm.Val;
|
||||
}
|
||||
|
||||
unsigned getVectorIndex() const {
|
||||
assert(Kind == k_VectorIndex && "Invalid access!");
|
||||
return VectorIndex.Val;
|
||||
}
|
||||
|
||||
ARM_MB::MemBOpt getMemBarrierOpt() const {
|
||||
assert(Kind == k_MemBarrierOpt && "Invalid access!");
|
||||
return MBOpt.Val;
|
||||
@ -859,6 +872,21 @@ public:
|
||||
bool isMSRMask() const { return Kind == k_MSRMask; }
|
||||
bool isProcIFlags() const { return Kind == k_ProcIFlags; }
|
||||
|
||||
bool isVectorIndex8() const {
|
||||
if (Kind != k_VectorIndex) return false;
|
||||
return VectorIndex.Val < 8;
|
||||
}
|
||||
bool isVectorIndex16() const {
|
||||
if (Kind != k_VectorIndex) return false;
|
||||
return VectorIndex.Val < 4;
|
||||
}
|
||||
bool isVectorIndex32() const {
|
||||
if (Kind != k_VectorIndex) return false;
|
||||
return VectorIndex.Val < 2;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
|
||||
// Add as immediates when possible. Null MCExpr = 0.
|
||||
if (Expr == 0)
|
||||
@ -1343,6 +1371,21 @@ public:
|
||||
Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
|
||||
}
|
||||
|
||||
void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
|
||||
assert(N == 1 && "Invalid number of operands!");
|
||||
Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
|
||||
}
|
||||
|
||||
void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
|
||||
assert(N == 1 && "Invalid number of operands!");
|
||||
Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
|
||||
}
|
||||
|
||||
void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
|
||||
assert(N == 1 && "Invalid number of operands!");
|
||||
Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
|
||||
}
|
||||
|
||||
virtual void print(raw_ostream &OS) const;
|
||||
|
||||
static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
|
||||
@ -1479,6 +1522,15 @@ public:
|
||||
return Op;
|
||||
}
|
||||
|
||||
static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
|
||||
MCContext &Ctx) {
|
||||
ARMOperand *Op = new ARMOperand(k_VectorIndex);
|
||||
Op->VectorIndex.Val = Idx;
|
||||
Op->StartLoc = S;
|
||||
Op->EndLoc = E;
|
||||
return Op;
|
||||
}
|
||||
|
||||
static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
|
||||
ARMOperand *Op = new ARMOperand(k_Immediate);
|
||||
Op->Imm.Val = Val;
|
||||
@ -1659,6 +1711,9 @@ void ARMOperand::print(raw_ostream &OS) const {
|
||||
case k_Token:
|
||||
OS << "'" << getToken() << "'";
|
||||
break;
|
||||
case k_VectorIndex:
|
||||
OS << "<vectorindex " << getVectorIndex() << ">";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1700,6 +1755,39 @@ int ARMAsmParser::tryParseRegister() {
|
||||
if (!RegNum) return -1;
|
||||
|
||||
Parser.Lex(); // Eat identifier token.
|
||||
|
||||
#if 0
|
||||
// Also check for an index operand. This is only legal for vector registers,
|
||||
// but that'll get caught OK in operand matching, so we don't need to
|
||||
// explicitly filter everything else out here.
|
||||
if (Parser.getTok().is(AsmToken::LBrac)) {
|
||||
SMLoc SIdx = Parser.getTok().getLoc();
|
||||
Parser.Lex(); // Eat left bracket token.
|
||||
|
||||
const MCExpr *ImmVal;
|
||||
SMLoc ExprLoc = Parser.getTok().getLoc();
|
||||
if (getParser().ParseExpression(ImmVal))
|
||||
return MatchOperand_ParseFail;
|
||||
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
|
||||
if (!MCE) {
|
||||
TokError("immediate value expected for vector index");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
SMLoc E = Parser.getTok().getLoc();
|
||||
if (Parser.getTok().isNot(AsmToken::RBrac)) {
|
||||
Error(E, "']' expected");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
Parser.Lex(); // Eat right bracket token.
|
||||
|
||||
Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
|
||||
SIdx, E,
|
||||
getContext()));
|
||||
}
|
||||
#endif
|
||||
|
||||
return RegNum;
|
||||
}
|
||||
|
||||
@ -1815,6 +1903,37 @@ tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
|
||||
Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
|
||||
ExclaimTok.getLoc()));
|
||||
Parser.Lex(); // Eat exclaim token
|
||||
return false;
|
||||
}
|
||||
|
||||
// Also check for an index operand. This is only legal for vector registers,
|
||||
// but that'll get caught OK in operand matching, so we don't need to
|
||||
// explicitly filter everything else out here.
|
||||
if (Parser.getTok().is(AsmToken::LBrac)) {
|
||||
SMLoc SIdx = Parser.getTok().getLoc();
|
||||
Parser.Lex(); // Eat left bracket token.
|
||||
|
||||
const MCExpr *ImmVal;
|
||||
SMLoc ExprLoc = Parser.getTok().getLoc();
|
||||
if (getParser().ParseExpression(ImmVal))
|
||||
return MatchOperand_ParseFail;
|
||||
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
|
||||
if (!MCE) {
|
||||
TokError("immediate value expected for vector index");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
SMLoc E = Parser.getTok().getLoc();
|
||||
if (Parser.getTok().isNot(AsmToken::RBrac)) {
|
||||
Error(E, "']' expected");
|
||||
return MatchOperand_ParseFail;
|
||||
}
|
||||
|
||||
Parser.Lex(); // Eat right bracket token.
|
||||
|
||||
Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
|
||||
SIdx, E,
|
||||
getContext()));
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -981,3 +981,8 @@ void ARMInstPrinter::printRotImmOperand(const MCInst *MI, unsigned OpNum,
|
||||
case 3: O << "24"; break;
|
||||
}
|
||||
}
|
||||
|
||||
void ARMInstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum,
|
||||
raw_ostream &O) {
|
||||
O << "[" << MI->getOperand(OpNum).getImm() << "]";
|
||||
}
|
||||
|
@ -127,6 +127,7 @@ public:
|
||||
|
||||
void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O);
|
||||
void printT2LdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O);
|
||||
void printVectorIndex(const MCInst *MI, unsigned OpNum, raw_ostream &O);
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
@ -16,16 +16,28 @@
|
||||
@ CHECK: vdup.16 q8, r5 @ encoding: [0xa0,0xee,0xb0,0x5b]
|
||||
@ CHECK: vdup.32 q7, r6 @ encoding: [0xae,0xee,0x10,0x6b]
|
||||
|
||||
@ vdup.8 d16, d16[1]
|
||||
@ vdup.16 d16, d16[1]
|
||||
@ vdup.32 d16, d16[1]
|
||||
@ vdup.8 q8, d16[1]
|
||||
@ vdup.16 q8, d16[1]
|
||||
@ vdup.32 q8, d16[1]
|
||||
vdup.8 d16, d11[0]
|
||||
vdup.16 d17, d12[0]
|
||||
vdup.32 d18, d13[0]
|
||||
vdup.8 q3, d10[0]
|
||||
vdup.16 q9, d9[0]
|
||||
vdup.32 q8, d8[0]
|
||||
vdup.8 d16, d11[1]
|
||||
vdup.16 d17, d12[1]
|
||||
vdup.32 d18, d13[1]
|
||||
vdup.8 q3, d10[1]
|
||||
vdup.16 q9, d9[1]
|
||||
vdup.32 q8, d8[1]
|
||||
|
||||
@ FIXME: vdup.8 d16, d16[1] @ encoding: [0x20,0x0c,0xf3,0xff]
|
||||
@ FIXME: vdup.16 d16, d16[1] @ encoding: [0x20,0x0c,0xf6,0xff]
|
||||
@ FIXME: vdup.32 d16, d16[1] @ encoding: [0x20,0x0c,0xfc,0xff]
|
||||
@ FIXME: vdup.8 q8, d16[1] @ encoding: [0x60,0x0c,0xf3,0xff]
|
||||
@ FIXME: vdup.16 q8, d16[1] @ encoding: [0x60,0x0c,0xf6,0xff]
|
||||
@ FIXME: vdup.32 q8, d16[1] @ encoding: [0x60,0x0c,0xfc,0xff]
|
||||
@ CHECK: vdup.8 d16, d11[0] @ encoding: [0xf1,0xff,0x0b,0x0c]
|
||||
@ CHECK: vdup.16 d17, d12[0] @ encoding: [0xf2,0xff,0x0c,0x1c]
|
||||
@ CHECK: vdup.32 d18, d13[0] @ encoding: [0xf4,0xff,0x0d,0x2c]
|
||||
@ CHECK: vdup.8 q3, d10[0] @ encoding: [0xb1,0xff,0x4a,0x6c]
|
||||
@ CHECK: vdup.16 q9, d9[0] @ encoding: [0xf2,0xff,0x49,0x2c]
|
||||
@ CHECK: vdup.32 q8, d8[0] @ encoding: [0xf4,0xff,0x48,0x0c]
|
||||
@ CHECK: vdup.8 d16, d11[1] @ encoding: [0xf3,0xff,0x0b,0x0c]
|
||||
@ CHECK: vdup.16 d17, d12[1] @ encoding: [0xf6,0xff,0x0c,0x1c]
|
||||
@ CHECK: vdup.32 d18, d13[1] @ encoding: [0xfc,0xff,0x0d,0x2c]
|
||||
@ CHECK: vdup.8 q3, d10[1] @ encoding: [0xb3,0xff,0x4a,0x6c]
|
||||
@ CHECK: vdup.16 q9, d9[1] @ encoding: [0xf6,0xff,0x49,0x2c]
|
||||
@ CHECK: vdup.32 q8, d8[1] @ encoding: [0xfc,0xff,0x48,0x0c]
|
||||
|
@ -624,6 +624,9 @@ static int ARMFlagFromOpName(LiteralConstantEmitter *type,
|
||||
IMM("postidx_imm8s4");
|
||||
IMM("imm_sr");
|
||||
IMM("imm1_31");
|
||||
IMM("VectorIndex8");
|
||||
IMM("VectorIndex16");
|
||||
IMM("VectorIndex32");
|
||||
|
||||
MISC("brtarget", "kOperandTypeARMBranchTarget"); // ?
|
||||
MISC("uncondbrtarget", "kOperandTypeARMBranchTarget"); // ?
|
||||
|
Loading…
x
Reference in New Issue
Block a user