[SystemZ] Define the GR64 low-word logic instructions as pseudo aliases.

Another patch to avoid duplication of encoding information.  Things like
NILF, NILL and NILH are used as both 32-bit and 64-bit instructions.
Here the 64-bit versions are defined as aliases of the 32-bit ones.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@191369 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Richard Sandiford 2013-09-25 11:11:53 +00:00
parent 3f22cc1df6
commit 259a6006e8
8 changed files with 160 additions and 111 deletions

View File

@ -56,6 +56,11 @@ namespace SystemZMC {
inline unsigned getRegAsGR64(unsigned Reg) { inline unsigned getRegAsGR64(unsigned Reg) {
return GR64Regs[getFirstReg(Reg)]; return GR64Regs[getFirstReg(Reg)];
} }
// Return the given register as a low GR32.
inline unsigned getRegAsGR32(unsigned Reg) {
return GR32Regs[getFirstReg(Reg)];
}
} }
MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII, MCCodeEmitter *createSystemZMCCodeEmitter(const MCInstrInfo &MCII,

View File

@ -26,6 +26,15 @@
using namespace llvm; using namespace llvm;
// Return an RI instruction like MI with opcode Opcode, but with the
// GR64 register operands turned into GR32s.
static MCInst lowerRILow(const MachineInstr *MI, unsigned Opcode) {
return MCInstBuilder(Opcode)
.addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
.addReg(SystemZMC::getRegAsGR32(MI->getOperand(1).getReg()))
.addImm(MI->getOperand(2).getImm());
}
void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) { void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
SystemZMCInstLower Lower(Mang, MF->getContext(), *this); SystemZMCInstLower Lower(Mang, MF->getContext(), *this);
MCInst LoweredMI; MCInst LoweredMI;
@ -55,6 +64,27 @@ void SystemZAsmPrinter::EmitInstruction(const MachineInstr *MI) {
LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R1D); LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R1D);
break; break;
case SystemZ::IILF64:
LoweredMI = MCInstBuilder(SystemZ::IILF)
.addReg(SystemZMC::getRegAsGR32(MI->getOperand(0).getReg()))
.addImm(MI->getOperand(2).getImm());
break;
#define LOWER_LOW(NAME) \
case SystemZ::NAME##64: LoweredMI = lowerRILow(MI, SystemZ::NAME); break
LOWER_LOW(IILL);
LOWER_LOW(IILH);
LOWER_LOW(NILL);
LOWER_LOW(NILH);
LOWER_LOW(NILF);
LOWER_LOW(OILL);
LOWER_LOW(OILH);
LOWER_LOW(OILF);
LOWER_LOW(XILF);
#undef LOWER_LOW
default: default:
Lower.lower(MI, LoweredMI); Lower.lower(MI, LoweredMI);
break; break;

View File

@ -2330,11 +2330,11 @@ SystemZTargetLowering::emitAtomicLoadBinary(MachineInstr *MI,
.addReg(RotatedOldVal).addOperand(Src2); .addReg(RotatedOldVal).addOperand(Src2);
if (BitSize < 32) if (BitSize < 32)
// XILF with the upper BitSize bits set. // XILF with the upper BitSize bits set.
BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
.addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize))); .addReg(Tmp).addImm(uint32_t(~0 << (32 - BitSize)));
else if (BitSize == 32) else if (BitSize == 32)
// XILF with every bit set. // XILF with every bit set.
BuildMI(MBB, DL, TII->get(SystemZ::XILF32), RotatedNewVal) BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
.addReg(Tmp).addImm(~uint32_t(0)); .addReg(Tmp).addImm(~uint32_t(0));
else { else {
// Use LCGR and add -1 to the result, which is more compact than // Use LCGR and add -1 to the result, which is more compact than
@ -2938,96 +2938,96 @@ EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const {
case SystemZ::ATOMIC_LOADW_NR: case SystemZ::ATOMIC_LOADW_NR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0); return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
case SystemZ::ATOMIC_LOADW_NILH: case SystemZ::ATOMIC_LOADW_NILH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
case SystemZ::ATOMIC_LOAD_NR: case SystemZ::ATOMIC_LOAD_NR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
case SystemZ::ATOMIC_LOAD_NILL32: case SystemZ::ATOMIC_LOAD_NILL:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
case SystemZ::ATOMIC_LOAD_NILH32: case SystemZ::ATOMIC_LOAD_NILH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
case SystemZ::ATOMIC_LOAD_NILF32: case SystemZ::ATOMIC_LOAD_NILF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
case SystemZ::ATOMIC_LOAD_NGR: case SystemZ::ATOMIC_LOAD_NGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
case SystemZ::ATOMIC_LOAD_NILL: case SystemZ::ATOMIC_LOAD_NILL64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
case SystemZ::ATOMIC_LOAD_NILH: case SystemZ::ATOMIC_LOAD_NILH64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
case SystemZ::ATOMIC_LOAD_NIHL: case SystemZ::ATOMIC_LOAD_NIHL:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64);
case SystemZ::ATOMIC_LOAD_NIHH: case SystemZ::ATOMIC_LOAD_NIHH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64);
case SystemZ::ATOMIC_LOAD_NILF: case SystemZ::ATOMIC_LOAD_NILF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
case SystemZ::ATOMIC_LOAD_NIHF: case SystemZ::ATOMIC_LOAD_NIHF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64);
case SystemZ::ATOMIC_LOADW_OR: case SystemZ::ATOMIC_LOADW_OR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0); return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 0);
case SystemZ::ATOMIC_LOADW_OILH: case SystemZ::ATOMIC_LOADW_OILH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 0); return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
case SystemZ::ATOMIC_LOAD_OR: case SystemZ::ATOMIC_LOAD_OR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::OR, 32);
case SystemZ::ATOMIC_LOAD_OILL32: case SystemZ::ATOMIC_LOAD_OILL:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL32, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
case SystemZ::ATOMIC_LOAD_OILH32: case SystemZ::ATOMIC_LOAD_OILH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH32, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
case SystemZ::ATOMIC_LOAD_OILF32: case SystemZ::ATOMIC_LOAD_OILF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF32, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
case SystemZ::ATOMIC_LOAD_OGR: case SystemZ::ATOMIC_LOAD_OGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
case SystemZ::ATOMIC_LOAD_OILL: case SystemZ::ATOMIC_LOAD_OILL64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
case SystemZ::ATOMIC_LOAD_OILH: case SystemZ::ATOMIC_LOAD_OILH64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
case SystemZ::ATOMIC_LOAD_OIHL: case SystemZ::ATOMIC_LOAD_OIHL:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL, 64);
case SystemZ::ATOMIC_LOAD_OIHH: case SystemZ::ATOMIC_LOAD_OIHH:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH, 64);
case SystemZ::ATOMIC_LOAD_OILF: case SystemZ::ATOMIC_LOAD_OILF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
case SystemZ::ATOMIC_LOAD_OIHF: case SystemZ::ATOMIC_LOAD_OIHF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF, 64);
case SystemZ::ATOMIC_LOADW_XR: case SystemZ::ATOMIC_LOADW_XR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0); return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
case SystemZ::ATOMIC_LOADW_XILF: case SystemZ::ATOMIC_LOADW_XILF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 0); return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
case SystemZ::ATOMIC_LOAD_XR: case SystemZ::ATOMIC_LOAD_XR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
case SystemZ::ATOMIC_LOAD_XILF32: case SystemZ::ATOMIC_LOAD_XILF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF32, 32); return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
case SystemZ::ATOMIC_LOAD_XGR: case SystemZ::ATOMIC_LOAD_XGR:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
case SystemZ::ATOMIC_LOAD_XILF: case SystemZ::ATOMIC_LOAD_XILF64:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
case SystemZ::ATOMIC_LOAD_XIHF: case SystemZ::ATOMIC_LOAD_XIHF:
return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64); return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF, 64);
case SystemZ::ATOMIC_LOADW_NRi: case SystemZ::ATOMIC_LOADW_NRi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0, true);
case SystemZ::ATOMIC_LOADW_NILHi: case SystemZ::ATOMIC_LOADW_NILHi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 0, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0, true);
case SystemZ::ATOMIC_LOAD_NRi: case SystemZ::ATOMIC_LOAD_NRi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32, true);
case SystemZ::ATOMIC_LOAD_NILL32i: case SystemZ::ATOMIC_LOAD_NILLi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL32, 32, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32, true);
case SystemZ::ATOMIC_LOAD_NILH32i: case SystemZ::ATOMIC_LOAD_NILHi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH32, 32, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32, true);
case SystemZ::ATOMIC_LOAD_NILF32i: case SystemZ::ATOMIC_LOAD_NILFi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF32, 32, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32, true);
case SystemZ::ATOMIC_LOAD_NGRi: case SystemZ::ATOMIC_LOAD_NGRi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64, true);
case SystemZ::ATOMIC_LOAD_NILLi: case SystemZ::ATOMIC_LOAD_NILL64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 64, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64, true);
case SystemZ::ATOMIC_LOAD_NILHi: case SystemZ::ATOMIC_LOAD_NILH64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 64, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64, true);
case SystemZ::ATOMIC_LOAD_NIHLi: case SystemZ::ATOMIC_LOAD_NIHLi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL, 64, true);
case SystemZ::ATOMIC_LOAD_NIHHi: case SystemZ::ATOMIC_LOAD_NIHHi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH, 64, true);
case SystemZ::ATOMIC_LOAD_NILFi: case SystemZ::ATOMIC_LOAD_NILF64i:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 64, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64, true);
case SystemZ::ATOMIC_LOAD_NIHFi: case SystemZ::ATOMIC_LOAD_NIHFi:
return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true); return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF, 64, true);

View File

@ -1477,3 +1477,19 @@ class Alias<int size, dag outs, dag ins, list<dag> pattern>
let isPseudo = 1; let isPseudo = 1;
let isCodeGenOnly = 1; let isCodeGenOnly = 1;
} }
// An alias of a BinaryRI, but with different register sizes.
class BinaryAliasRI<SDPatternOperator operator, RegisterOperand cls,
Immediate imm>
: Alias<4, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
[(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
let Constraints = "$R1 = $R1src";
}
// An alias of a BinaryRIL, but with different register sizes.
class BinaryAliasRIL<SDPatternOperator operator, RegisterOperand cls,
Immediate imm>
: Alias<6, (outs cls:$R1), (ins cls:$R1src, imm:$I2),
[(set cls:$R1, (operator cls:$R1src, imm:$I2))]> {
let Constraints = "$R1 = $R1src";
}

View File

@ -535,14 +535,14 @@ namespace {
static LogicOp interpretAndImmediate(unsigned Opcode) { static LogicOp interpretAndImmediate(unsigned Opcode) {
switch (Opcode) { switch (Opcode) {
case SystemZ::NILL32: return LogicOp(32, 0, 16); case SystemZ::NILL: return LogicOp(32, 0, 16);
case SystemZ::NILH32: return LogicOp(32, 16, 16); case SystemZ::NILH: return LogicOp(32, 16, 16);
case SystemZ::NILL: return LogicOp(64, 0, 16); case SystemZ::NILL64: return LogicOp(64, 0, 16);
case SystemZ::NILH: return LogicOp(64, 16, 16); case SystemZ::NILH64: return LogicOp(64, 16, 16);
case SystemZ::NIHL: return LogicOp(64, 32, 16); case SystemZ::NIHL: return LogicOp(64, 32, 16);
case SystemZ::NIHH: return LogicOp(64, 48, 16); case SystemZ::NIHH: return LogicOp(64, 48, 16);
case SystemZ::NILF32: return LogicOp(32, 0, 32); case SystemZ::NILF: return LogicOp(32, 0, 32);
case SystemZ::NILF: return LogicOp(64, 0, 32); case SystemZ::NILF64: return LogicOp(64, 0, 32);
case SystemZ::NIHF: return LogicOp(64, 32, 32); case SystemZ::NIHF: return LogicOp(64, 32, 32);
default: return LogicOp(); default: return LogicOp();
} }

View File

@ -570,12 +570,10 @@ defm : InsertMem<"inserti8", ICY, GR64, azextloadi8, bdxaddr20pair>;
// Insertions of a 16-bit immediate, leaving other bits unaffected. // Insertions of a 16-bit immediate, leaving other bits unaffected.
// We don't have or_as_insert equivalents of these operations because // We don't have or_as_insert equivalents of these operations because
// OI is available instead. // OI is available instead.
let isCodeGenOnly = 1 in { def IILL : BinaryRI<"iill", 0xA53, insertll, GR32, imm32ll16>;
def IILL32 : BinaryRI<"iill", 0xA53, insertll, GR32, imm32ll16>; def IILH : BinaryRI<"iilh", 0xA52, insertlh, GR32, imm32lh16>;
def IILH32 : BinaryRI<"iilh", 0xA52, insertlh, GR32, imm32lh16>; def IILL64 : BinaryAliasRI<insertll, GR64, imm64ll16>;
} def IILH64 : BinaryAliasRI<insertlh, GR64, imm64lh16>;
def IILL : BinaryRI<"iill", 0xA53, insertll, GR64, imm64ll16>;
def IILH : BinaryRI<"iilh", 0xA52, insertlh, GR64, imm64lh16>;
def IIHL : BinaryRI<"iihl", 0xA51, inserthl, GR64, imm64hl16>; def IIHL : BinaryRI<"iihl", 0xA51, inserthl, GR64, imm64hl16>;
def IIHH : BinaryRI<"iihh", 0xA50, inserthh, GR64, imm64hh16>; def IIHH : BinaryRI<"iihh", 0xA50, inserthh, GR64, imm64hh16>;
@ -583,11 +581,9 @@ def IIHH : BinaryRI<"iihh", 0xA50, inserthh, GR64, imm64hh16>;
// full-width move. (We use IILF rather than something like LLILF // full-width move. (We use IILF rather than something like LLILF
// for 32-bit moves because IILF leaves the upper 32 bits of the // for 32-bit moves because IILF leaves the upper 32 bits of the
// GR64 unchanged.) // GR64 unchanged.)
let isCodeGenOnly = 1, isAsCheapAsAMove = 1, isMoveImm = 1, let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in
isReMaterializable = 1 in { def IILF : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>;
def IILF32 : UnaryRIL<"iilf", 0xC09, bitconvert, GR32, uimm32>; def IILF64 : BinaryAliasRIL<insertlf, GR64, imm64lf32>;
}
def IILF : BinaryRIL<"iilf", 0xC09, insertlf, GR64, imm64lf32>;
def IIHF : BinaryRIL<"iihf", 0xC08, inserthf, GR64, imm64hf32>; def IIHF : BinaryRIL<"iihf", 0xC08, inserthf, GR64, imm64hf32>;
// An alternative model of inserthf, with the first operand being // An alternative model of inserthf, with the first operand being
@ -730,21 +726,19 @@ let Defs = [CC] in {
let isConvertibleToThreeAddress = 1 in { let isConvertibleToThreeAddress = 1 in {
// ANDs of a 16-bit immediate, leaving other bits unaffected. // ANDs of a 16-bit immediate, leaving other bits unaffected.
// The CC result only reflects the 16-bit field, not the full register. // The CC result only reflects the 16-bit field, not the full register.
let isCodeGenOnly = 1 in { def NILL : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>;
def NILL32 : BinaryRI<"nill", 0xA57, and, GR32, imm32ll16c>; def NILH : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>;
def NILH32 : BinaryRI<"nilh", 0xA56, and, GR32, imm32lh16c>; def NILL64 : BinaryAliasRI<and, GR64, imm64ll16c>;
} def NILH64 : BinaryAliasRI<and, GR64, imm64lh16c>;
def NILL : BinaryRI<"nill", 0xA57, and, GR64, imm64ll16c>;
def NILH : BinaryRI<"nilh", 0xA56, and, GR64, imm64lh16c>;
def NIHL : BinaryRI<"nihl", 0xA55, and, GR64, imm64hl16c>; def NIHL : BinaryRI<"nihl", 0xA55, and, GR64, imm64hl16c>;
def NIHH : BinaryRI<"nihh", 0xA54, and, GR64, imm64hh16c>; def NIHH : BinaryRI<"nihh", 0xA54, and, GR64, imm64hh16c>;
// ANDs of a 32-bit immediate, leaving other bits unaffected. // ANDs of a 32-bit immediate, leaving other bits unaffected.
// The CC result only reflects the 32-bit field, which means we can // The CC result only reflects the 32-bit field, which means we can
// use it as a zero indicator for i32 operations but not otherwise. // use it as a zero indicator for i32 operations but not otherwise.
let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in let CCValues = 0xC, CompareZeroCCMask = 0x8 in
def NILF32 : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>; def NILF : BinaryRIL<"nilf", 0xC0B, and, GR32, uimm32>;
def NILF : BinaryRIL<"nilf", 0xC0B, and, GR64, imm64lf32c>; def NILF64 : BinaryAliasRIL<and, GR64, imm64lf32c>;
def NIHF : BinaryRIL<"nihf", 0xC0A, and, GR64, imm64hf32c>; def NIHF : BinaryRIL<"nihf", 0xC0A, and, GR64, imm64hf32c>;
} }
@ -777,21 +771,19 @@ let Defs = [CC] in {
// ORs of a 16-bit immediate, leaving other bits unaffected. // ORs of a 16-bit immediate, leaving other bits unaffected.
// The CC result only reflects the 16-bit field, not the full register. // The CC result only reflects the 16-bit field, not the full register.
let isCodeGenOnly = 1 in { def OILL : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>;
def OILL32 : BinaryRI<"oill", 0xA5B, or, GR32, imm32ll16>; def OILH : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>;
def OILH32 : BinaryRI<"oilh", 0xA5A, or, GR32, imm32lh16>; def OILL64 : BinaryAliasRI<or, GR64, imm64ll16>;
} def OILH64 : BinaryAliasRI<or, GR64, imm64lh16>;
def OILL : BinaryRI<"oill", 0xA5B, or, GR64, imm64ll16>;
def OILH : BinaryRI<"oilh", 0xA5A, or, GR64, imm64lh16>;
def OIHL : BinaryRI<"oihl", 0xA59, or, GR64, imm64hl16>; def OIHL : BinaryRI<"oihl", 0xA59, or, GR64, imm64hl16>;
def OIHH : BinaryRI<"oihh", 0xA58, or, GR64, imm64hh16>; def OIHH : BinaryRI<"oihh", 0xA58, or, GR64, imm64hh16>;
// ORs of a 32-bit immediate, leaving other bits unaffected. // ORs of a 32-bit immediate, leaving other bits unaffected.
// The CC result only reflects the 32-bit field, which means we can // The CC result only reflects the 32-bit field, which means we can
// use it as a zero indicator for i32 operations but not otherwise. // use it as a zero indicator for i32 operations but not otherwise.
let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in let CCValues = 0xC, CompareZeroCCMask = 0x8 in
def OILF32 : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>; def OILF : BinaryRIL<"oilf", 0xC0D, or, GR32, uimm32>;
def OILF : BinaryRIL<"oilf", 0xC0D, or, GR64, imm64lf32>; def OILF64 : BinaryAliasRIL<or, GR64, imm64lf32>;
def OIHF : BinaryRIL<"oihf", 0xC0C, or, GR64, imm64hf32>; def OIHF : BinaryRIL<"oihf", 0xC0C, or, GR64, imm64hf32>;
// ORs of memory. // ORs of memory.
@ -824,9 +816,9 @@ let Defs = [CC] in {
// XORs of a 32-bit immediate, leaving other bits unaffected. // XORs of a 32-bit immediate, leaving other bits unaffected.
// The CC result only reflects the 32-bit field, which means we can // The CC result only reflects the 32-bit field, which means we can
// use it as a zero indicator for i32 operations but not otherwise. // use it as a zero indicator for i32 operations but not otherwise.
let isCodeGenOnly = 1, CCValues = 0xC, CompareZeroCCMask = 0x8 in let CCValues = 0xC, CompareZeroCCMask = 0x8 in
def XILF32 : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>; def XILF : BinaryRIL<"xilf", 0xC07, xor, GR32, uimm32>;
def XILF : BinaryRIL<"xilf", 0xC07, xor, GR64, imm64lf32>; def XILF64 : BinaryAliasRIL<xor, GR64, imm64lf32>;
def XIHF : BinaryRIL<"xihf", 0xC06, xor, GR64, imm64hf32>; def XIHF : BinaryRIL<"xihf", 0xC06, xor, GR64, imm64hf32>;
// XORs of memory. // XORs of memory.
@ -1036,18 +1028,16 @@ let mayLoad = 1, Defs = [CC], Uses = [R0W] in
// Test under mask. // Test under mask.
let Defs = [CC] in { let Defs = [CC] in {
let isCodeGenOnly = 1 in { def TMLL : CompareRI<"tmll", 0xA71, z_tm_reg, GR32, imm32ll16>;
def TMLL32 : CompareRI<"tmll", 0xA71, z_tm_reg, GR32, imm32ll16>; def TMLH : CompareRI<"tmlh", 0xA70, z_tm_reg, GR32, imm32lh16>;
def TMLH32 : CompareRI<"tmlh", 0xA70, z_tm_reg, GR32, imm32lh16>;
}
def TMLL : CompareRI<"tmll", 0xA71, z_tm_reg, GR64, imm64ll16>;
def TMLH : CompareRI<"tmlh", 0xA70, z_tm_reg, GR64, imm64lh16>;
def TMHL : CompareRI<"tmhl", 0xA73, z_tm_reg, GR64, imm64hl16>; def TMHL : CompareRI<"tmhl", 0xA73, z_tm_reg, GR64, imm64hl16>;
def TMHH : CompareRI<"tmhh", 0xA72, z_tm_reg, GR64, imm64hh16>; def TMHH : CompareRI<"tmhh", 0xA72, z_tm_reg, GR64, imm64hh16>;
defm TM : CompareSIPair<"tm", 0x91, 0xEB51, z_tm_mem, anyextloadi8, imm32zx8>; defm TM : CompareSIPair<"tm", 0x91, 0xEB51, z_tm_mem, anyextloadi8, imm32zx8>;
} }
def : CompareGR64RI<TMLL, z_tm_reg, imm64ll16>;
def : CompareGR64RI<TMLH, z_tm_reg, imm64lh16>;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Prefetch // Prefetch
@ -1080,58 +1070,58 @@ def ATOMIC_LOAD_SGR : AtomicLoadBinaryReg64<atomic_load_sub_64>;
def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>; def ATOMIC_LOADW_NR : AtomicLoadWBinaryReg<z_atomic_loadw_and>;
def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>; def ATOMIC_LOADW_NILH : AtomicLoadWBinaryImm<z_atomic_loadw_and, imm32lh16c>;
def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>; def ATOMIC_LOAD_NR : AtomicLoadBinaryReg32<atomic_load_and_32>;
def ATOMIC_LOAD_NILL32 : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>; def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm32<atomic_load_and_32, imm32ll16c>;
def ATOMIC_LOAD_NILH32 : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>; def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm32<atomic_load_and_32, imm32lh16c>;
def ATOMIC_LOAD_NILF32 : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>; def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm32<atomic_load_and_32, uimm32>;
def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>; def ATOMIC_LOAD_NGR : AtomicLoadBinaryReg64<atomic_load_and_64>;
def ATOMIC_LOAD_NILL : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>; def ATOMIC_LOAD_NILL64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64ll16c>;
def ATOMIC_LOAD_NILH : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>; def ATOMIC_LOAD_NILH64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lh16c>;
def ATOMIC_LOAD_NIHL : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hl16c>; def ATOMIC_LOAD_NIHL : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hl16c>;
def ATOMIC_LOAD_NIHH : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hh16c>; def ATOMIC_LOAD_NIHH : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hh16c>;
def ATOMIC_LOAD_NILF : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>; def ATOMIC_LOAD_NILF64 : AtomicLoadBinaryImm64<atomic_load_and_64, imm64lf32c>;
def ATOMIC_LOAD_NIHF : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hf32c>; def ATOMIC_LOAD_NIHF : AtomicLoadBinaryImm64<atomic_load_and_64, imm64hf32c>;
def ATOMIC_LOADW_OR : AtomicLoadWBinaryReg<z_atomic_loadw_or>; def ATOMIC_LOADW_OR : AtomicLoadWBinaryReg<z_atomic_loadw_or>;
def ATOMIC_LOADW_OILH : AtomicLoadWBinaryImm<z_atomic_loadw_or, imm32lh16>; def ATOMIC_LOADW_OILH : AtomicLoadWBinaryImm<z_atomic_loadw_or, imm32lh16>;
def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>; def ATOMIC_LOAD_OR : AtomicLoadBinaryReg32<atomic_load_or_32>;
def ATOMIC_LOAD_OILL32 : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>; def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm32<atomic_load_or_32, imm32ll16>;
def ATOMIC_LOAD_OILH32 : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>; def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm32<atomic_load_or_32, imm32lh16>;
def ATOMIC_LOAD_OILF32 : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>; def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm32<atomic_load_or_32, uimm32>;
def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>; def ATOMIC_LOAD_OGR : AtomicLoadBinaryReg64<atomic_load_or_64>;
def ATOMIC_LOAD_OILL : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>; def ATOMIC_LOAD_OILL64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64ll16>;
def ATOMIC_LOAD_OILH : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>; def ATOMIC_LOAD_OILH64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lh16>;
def ATOMIC_LOAD_OIHL : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>; def ATOMIC_LOAD_OIHL : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hl16>;
def ATOMIC_LOAD_OIHH : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>; def ATOMIC_LOAD_OIHH : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hh16>;
def ATOMIC_LOAD_OILF : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>; def ATOMIC_LOAD_OILF64 : AtomicLoadBinaryImm64<atomic_load_or_64, imm64lf32>;
def ATOMIC_LOAD_OIHF : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>; def ATOMIC_LOAD_OIHF : AtomicLoadBinaryImm64<atomic_load_or_64, imm64hf32>;
def ATOMIC_LOADW_XR : AtomicLoadWBinaryReg<z_atomic_loadw_xor>; def ATOMIC_LOADW_XR : AtomicLoadWBinaryReg<z_atomic_loadw_xor>;
def ATOMIC_LOADW_XILF : AtomicLoadWBinaryImm<z_atomic_loadw_xor, uimm32>; def ATOMIC_LOADW_XILF : AtomicLoadWBinaryImm<z_atomic_loadw_xor, uimm32>;
def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>; def ATOMIC_LOAD_XR : AtomicLoadBinaryReg32<atomic_load_xor_32>;
def ATOMIC_LOAD_XILF32 : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>; def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm32<atomic_load_xor_32, uimm32>;
def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>; def ATOMIC_LOAD_XGR : AtomicLoadBinaryReg64<atomic_load_xor_64>;
def ATOMIC_LOAD_XILF : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>; def ATOMIC_LOAD_XILF64 : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64lf32>;
def ATOMIC_LOAD_XIHF : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>; def ATOMIC_LOAD_XIHF : AtomicLoadBinaryImm64<atomic_load_xor_64, imm64hf32>;
def ATOMIC_LOADW_NRi : AtomicLoadWBinaryReg<z_atomic_loadw_nand>; def ATOMIC_LOADW_NRi : AtomicLoadWBinaryReg<z_atomic_loadw_nand>;
def ATOMIC_LOADW_NILHi : AtomicLoadWBinaryImm<z_atomic_loadw_nand, def ATOMIC_LOADW_NILHi : AtomicLoadWBinaryImm<z_atomic_loadw_nand,
imm32lh16c>; imm32lh16c>;
def ATOMIC_LOAD_NRi : AtomicLoadBinaryReg32<atomic_load_nand_32>; def ATOMIC_LOAD_NRi : AtomicLoadBinaryReg32<atomic_load_nand_32>;
def ATOMIC_LOAD_NILL32i : AtomicLoadBinaryImm32<atomic_load_nand_32, def ATOMIC_LOAD_NILLi : AtomicLoadBinaryImm32<atomic_load_nand_32,
imm32ll16c>; imm32ll16c>;
def ATOMIC_LOAD_NILH32i : AtomicLoadBinaryImm32<atomic_load_nand_32, def ATOMIC_LOAD_NILHi : AtomicLoadBinaryImm32<atomic_load_nand_32,
imm32lh16c>; imm32lh16c>;
def ATOMIC_LOAD_NILF32i : AtomicLoadBinaryImm32<atomic_load_nand_32, uimm32>; def ATOMIC_LOAD_NILFi : AtomicLoadBinaryImm32<atomic_load_nand_32, uimm32>;
def ATOMIC_LOAD_NGRi : AtomicLoadBinaryReg64<atomic_load_nand_64>; def ATOMIC_LOAD_NGRi : AtomicLoadBinaryReg64<atomic_load_nand_64>;
def ATOMIC_LOAD_NILLi : AtomicLoadBinaryImm64<atomic_load_nand_64, def ATOMIC_LOAD_NILL64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64ll16c>; imm64ll16c>;
def ATOMIC_LOAD_NILHi : AtomicLoadBinaryImm64<atomic_load_nand_64, def ATOMIC_LOAD_NILH64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64lh16c>; imm64lh16c>;
def ATOMIC_LOAD_NIHLi : AtomicLoadBinaryImm64<atomic_load_nand_64, def ATOMIC_LOAD_NIHLi : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64hl16c>; imm64hl16c>;
def ATOMIC_LOAD_NIHHi : AtomicLoadBinaryImm64<atomic_load_nand_64, def ATOMIC_LOAD_NIHHi : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64hh16c>; imm64hh16c>;
def ATOMIC_LOAD_NILFi : AtomicLoadBinaryImm64<atomic_load_nand_64, def ATOMIC_LOAD_NILF64i : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64lf32c>; imm64lf32c>;
def ATOMIC_LOAD_NIHFi : AtomicLoadBinaryImm64<atomic_load_nand_64, def ATOMIC_LOAD_NIHFi : AtomicLoadBinaryImm64<atomic_load_nand_64,
imm64hf32c>; imm64hf32c>;

View File

@ -112,6 +112,14 @@ multiclass CondStores64<Instruction insn, Instruction insninv,
uimm8zx4:$valid, uimm8zx4:$cc)>; uimm8zx4:$valid, uimm8zx4:$cc)>;
} }
// INSN performs a comparison between a 32-bit register and a constant.
// Record that it is equivalent to comparing the low word of a GR64 with IMM.
class CompareGR64RI<Instruction insn, SDPatternOperator compare,
Immediate imm>
: Pat<(compare GR64:$R1, imm:$I2),
(insn (EXTRACT_SUBREG GR64:$R1, subreg_32bit),
(imm.OperandTransform imm:$I2))>;
// Try to use MVC instruction INSN for a load of type LOAD followed by a store // Try to use MVC instruction INSN for a load of type LOAD followed by a store
// of the same size. VT is the type of the intermediate (legalized) value and // of the same size. VT is the type of the intermediate (legalized) value and
// LENGTH is the number of bytes loaded by LOAD. // LENGTH is the number of bytes loaded by LOAD.

View File

@ -119,7 +119,7 @@ bool SystemZShortenInst::processBlock(MachineBasicBlock *MBB) {
MBBE = MBB->rend(); MBBI != MBBE; ++MBBI) { MBBE = MBB->rend(); MBBI != MBBE; ++MBBI) {
MachineInstr &MI = *MBBI; MachineInstr &MI = *MBBI;
unsigned Opcode = MI.getOpcode(); unsigned Opcode = MI.getOpcode();
if (Opcode == SystemZ::IILF32) if (Opcode == SystemZ::IILF)
Changed |= shortenIIF(MI, LowGPRs, LiveHigh, SystemZ::LLILL, Changed |= shortenIIF(MI, LowGPRs, LiveHigh, SystemZ::LLILL,
SystemZ::LLILH); SystemZ::LLILH);
unsigned UsedLow = 0; unsigned UsedLow = 0;