mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-11-23 16:19:52 +00:00
Expand Op0Mask by one bit in preparation for the PadLock prefixes.
Define most shift masks incrementally to reduce the redundant hard-coding. Introduce new shift for the VEX flags to replace the magic constant 32 in various places. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@128822 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
@@ -382,7 +382,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
|
||||
const TargetInstrDesc &Desc,
|
||||
raw_ostream &OS) const {
|
||||
bool HasVEX_4V = false;
|
||||
if ((TSFlags >> 32) & X86II::VEX_4V)
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_4V)
|
||||
HasVEX_4V = true;
|
||||
|
||||
// VEX_R: opcode externsion equivalent to REX.R in
|
||||
@@ -446,10 +446,10 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
|
||||
if (TSFlags & X86II::OpSize)
|
||||
VEX_PP = 0x01;
|
||||
|
||||
if ((TSFlags >> 32) & X86II::VEX_W)
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
|
||||
VEX_W = 1;
|
||||
|
||||
if ((TSFlags >> 32) & X86II::VEX_L)
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
|
||||
VEX_L = 1;
|
||||
|
||||
switch (TSFlags & X86II::Op0Mask) {
|
||||
@@ -518,7 +518,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
|
||||
|
||||
// If the last register should be encoded in the immediate field
|
||||
// do not use any bit from VEX prefix to this register, ignore it
|
||||
if ((TSFlags >> 32) & X86II::VEX_I8IMM)
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM)
|
||||
NumOps--;
|
||||
|
||||
for (; CurOp != NumOps; ++CurOp) {
|
||||
@@ -819,9 +819,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
|
||||
// It uses the VEX.VVVV field?
|
||||
bool HasVEX_4V = false;
|
||||
|
||||
if ((TSFlags >> 32) & X86II::VEX)
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::VEX)
|
||||
HasVEXPrefix = true;
|
||||
if ((TSFlags >> 32) & X86II::VEX_4V)
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_4V)
|
||||
HasVEX_4V = true;
|
||||
|
||||
|
||||
@@ -837,7 +837,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
|
||||
|
||||
unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
|
||||
|
||||
if ((TSFlags >> 32) & X86II::Has3DNow0F0FOpcode)
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
|
||||
BaseOpcode = 0x0F; // Weird 3DNow! encoding.
|
||||
|
||||
unsigned SrcRegNum = 0;
|
||||
@@ -994,7 +994,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
|
||||
if (CurOp != NumOps) {
|
||||
// The last source register of a 4 operand instruction in AVX is encoded
|
||||
// in bits[7:4] of a immediate byte, and bits[3:0] are ignored.
|
||||
if ((TSFlags >> 32) & X86II::VEX_I8IMM) {
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
|
||||
const MCOperand &MO = MI.getOperand(CurOp++);
|
||||
bool IsExtReg =
|
||||
X86InstrInfo::isX86_64ExtendedReg(MO.getReg());
|
||||
@@ -1017,7 +1017,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS,
|
||||
}
|
||||
}
|
||||
|
||||
if ((TSFlags >> 32) & X86II::Has3DNow0F0FOpcode)
|
||||
if ((TSFlags >> X86II::VEXShift) & X86II::Has3DNow0F0FOpcode)
|
||||
EmitByte(X86II::getBaseOpcodeFor(TSFlags), CurByte, OS);
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user