Fix the last crimes against nature that used the 'ir' ordering to use the

'ri' ordering instead... no it's not possible to store a register into an
immediate!


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@11529 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2004-02-17 06:24:02 +00:00
parent f120ebbf8a
commit 7ddc3fbd22
5 changed files with 47 additions and 47 deletions

View File

@ -1233,7 +1233,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
} else {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(1);
BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(1);
}
Opcode = X86::REP_MOVSW;
break;
@ -1242,7 +1242,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
} else {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(2);
BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(2);
}
Opcode = X86::REP_MOVSD;
break;
@ -1285,7 +1285,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
} else {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(1);
BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(1);
}
BuildMI(BB, X86::MOVri16, 1, X86::AX).addZImm((Val << 8) | Val);
Opcode = X86::REP_STOSW;
@ -1295,7 +1295,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
} else {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(2);
BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(2);
}
Val = (Val << 8) | Val;
BuildMI(BB, X86::MOVri32, 1, X86::EAX).addZImm((Val << 16) | Val);
@ -1512,13 +1512,13 @@ void ISel::doMultiplyConst(MachineBasicBlock *MBB,
switch (Class) {
default: assert(0 && "Unknown class for this function!");
case cByte:
BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
return;
case cShort:
BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
return;
case cInt:
BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
return;
}
}
@ -1646,7 +1646,7 @@ void ISel::emitDivRemOperation(MachineBasicBlock *BB,
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 };
static const unsigned SarOpcode[]={ X86::SARri8, X86::SARri16, X86::SARri32 };
static const unsigned ClrOpcode[]={ X86::MOVri8, X86::MOVri16, X86::MOVri32 };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
@ -1706,10 +1706,10 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
unsigned Class = getClass (ResultTy);
static const unsigned ConstantOperand[][4] = {
{ X86::SHRir8, X86::SHRir16, X86::SHRir32, X86::SHRDir32 }, // SHR
{ X86::SARir8, X86::SARir16, X86::SARir32, X86::SHRDir32 }, // SAR
{ X86::SHLir8, X86::SHLir16, X86::SHLir32, X86::SHLDir32 }, // SHL
{ X86::SHLir8, X86::SHLir16, X86::SHLir32, X86::SHLDir32 }, // SAL = SHL
{ X86::SHRri8, X86::SHRri16, X86::SHRri32, X86::SHRDri32 }, // SHR
{ X86::SARri8, X86::SARri16, X86::SARri32, X86::SHRDri32 }, // SAR
{ X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDri32 }, // SHL
{ X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDri32 }, // SAL = SHL
};
static const unsigned NonConstantOperand[][4] = {
@ -1740,12 +1740,12 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
} else { // Shifting more than 32 bits
Amount -= 32;
if (isLeftShift) {
BMI(MBB, IP, X86::SHLir32, 2,
BMI(MBB, IP, X86::SHLri32, 2,
DestReg + 1).addReg(SrcReg).addZImm(Amount);
BMI(MBB, IP, X86::MOVri32, 1,
DestReg).addZImm(0);
} else {
unsigned Opcode = isSigned ? X86::SARir32 : X86::SHRir32;
unsigned Opcode = isSigned ? X86::SARri32 : X86::SHRri32;
BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
BMI(MBB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
}
@ -1757,7 +1757,7 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
// If this is a SHR of a Long, then we need to do funny sign extension
// stuff. TmpReg gets the value to use as the high-part if we are
// shifting more than 32 bits.
BMI(MBB, IP, X86::SARir32, 2, TmpReg).addReg(SrcReg).addZImm(31);
BMI(MBB, IP, X86::SARri32, 2, TmpReg).addReg(SrcReg).addZImm(31);
} else {
// Other shifts use a fixed zero value if the shift is more than 32
// bits.
@ -1991,7 +1991,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
if (isUnsigned) // Zero out top bits...
BMI(BB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
else // Sign extend bottom half...
BMI(BB, IP, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31);
BMI(BB, IP, X86::SARri32, 2, DestReg+1).addReg(DestReg).addZImm(31);
}
return;
}

View File

@ -395,7 +395,7 @@ bool SSAPH::OptimizeAddress(MachineInstr *MI, unsigned OpNo) {
}
break;
case X86::SHLir32:
case X86::SHLri32:
// If this shift could be folded into the index portion of the address if
// it were the index register, move it to the index register operand now,
// so it will be folded in below.
@ -413,7 +413,7 @@ bool SSAPH::OptimizeAddress(MachineInstr *MI, unsigned OpNo) {
// Attempt to fold instructions used by the index into the instruction
if (MachineInstr *DefInst = getDefiningInst(IndexRegOp)) {
switch (DefInst->getOpcode()) {
case X86::SHLir32: {
case X86::SHLri32: {
// Figure out what the resulting scale would be if we folded this shift.
unsigned ResScale = Scale * (1 << DefInst->getOperand(2).getImmedValue());
if (isValidScaleAmount(ResScale)) {

View File

@ -1233,7 +1233,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
} else {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(1);
BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(1);
}
Opcode = X86::REP_MOVSW;
break;
@ -1242,7 +1242,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg = getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
} else {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(2);
BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(2);
}
Opcode = X86::REP_MOVSD;
break;
@ -1285,7 +1285,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/2));
} else {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(1);
BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(1);
}
BuildMI(BB, X86::MOVri16, 1, X86::AX).addZImm((Val << 8) | Val);
Opcode = X86::REP_STOSW;
@ -1295,7 +1295,7 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
CountReg =getReg(ConstantUInt::get(Type::UIntTy, I->getRawValue()/4));
} else {
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(2);
BuildMI(BB, X86::SHRri32, 2, CountReg).addReg(ByteReg).addZImm(2);
}
Val = (Val << 8) | Val;
BuildMI(BB, X86::MOVri32, 1, X86::EAX).addZImm((Val << 16) | Val);
@ -1512,13 +1512,13 @@ void ISel::doMultiplyConst(MachineBasicBlock *MBB,
switch (Class) {
default: assert(0 && "Unknown class for this function!");
case cByte:
BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
return;
case cShort:
BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
return;
case cInt:
BMI(MBB, IP, X86::SHLir32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
BMI(MBB, IP, X86::SHLri32, 2, DestReg).addReg(op0Reg).addZImm(Shift-1);
return;
}
}
@ -1646,7 +1646,7 @@ void ISel::emitDivRemOperation(MachineBasicBlock *BB,
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 };
static const unsigned SarOpcode[]={ X86::SARri8, X86::SARri16, X86::SARri32 };
static const unsigned ClrOpcode[]={ X86::MOVri8, X86::MOVri16, X86::MOVri32 };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
@ -1706,10 +1706,10 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
unsigned Class = getClass (ResultTy);
static const unsigned ConstantOperand[][4] = {
{ X86::SHRir8, X86::SHRir16, X86::SHRir32, X86::SHRDir32 }, // SHR
{ X86::SARir8, X86::SARir16, X86::SARir32, X86::SHRDir32 }, // SAR
{ X86::SHLir8, X86::SHLir16, X86::SHLir32, X86::SHLDir32 }, // SHL
{ X86::SHLir8, X86::SHLir16, X86::SHLir32, X86::SHLDir32 }, // SAL = SHL
{ X86::SHRri8, X86::SHRri16, X86::SHRri32, X86::SHRDri32 }, // SHR
{ X86::SARri8, X86::SARri16, X86::SARri32, X86::SHRDri32 }, // SAR
{ X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDri32 }, // SHL
{ X86::SHLri8, X86::SHLri16, X86::SHLri32, X86::SHLDri32 }, // SAL = SHL
};
static const unsigned NonConstantOperand[][4] = {
@ -1740,12 +1740,12 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
} else { // Shifting more than 32 bits
Amount -= 32;
if (isLeftShift) {
BMI(MBB, IP, X86::SHLir32, 2,
BMI(MBB, IP, X86::SHLri32, 2,
DestReg + 1).addReg(SrcReg).addZImm(Amount);
BMI(MBB, IP, X86::MOVri32, 1,
DestReg).addZImm(0);
} else {
unsigned Opcode = isSigned ? X86::SARir32 : X86::SHRir32;
unsigned Opcode = isSigned ? X86::SARri32 : X86::SHRri32;
BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
BMI(MBB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
}
@ -1757,7 +1757,7 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
// If this is a SHR of a Long, then we need to do funny sign extension
// stuff. TmpReg gets the value to use as the high-part if we are
// shifting more than 32 bits.
BMI(MBB, IP, X86::SARir32, 2, TmpReg).addReg(SrcReg).addZImm(31);
BMI(MBB, IP, X86::SARri32, 2, TmpReg).addReg(SrcReg).addZImm(31);
} else {
// Other shifts use a fixed zero value if the shift is more than 32
// bits.
@ -1991,7 +1991,7 @@ void ISel::emitCastOperation(MachineBasicBlock *BB,
if (isUnsigned) // Zero out top bits...
BMI(BB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
else // Sign extend bottom half...
BMI(BB, IP, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31);
BMI(BB, IP, X86::SARri32, 2, DestReg+1).addReg(DestReg).addZImm(31);
}
return;
}

View File

@ -373,26 +373,26 @@ class UsesCL { list<Register> Uses = [CL]; bit printImplicitUses = 1; }
def SHLrr8 : I2A8 <"shl", 0xD2, MRMS4r > , UsesCL; // R8 <<= cl
def SHLrr16 : I2A8 <"shl", 0xD3, MRMS4r >, OpSize, UsesCL; // R16 <<= cl
def SHLrr32 : I2A8 <"shl", 0xD3, MRMS4r > , UsesCL; // R32 <<= cl
def SHLir8 : I2A8 <"shl", 0xC0, MRMS4r >; // R8 <<= imm8
def SHLir16 : I2A8 <"shl", 0xC1, MRMS4r >, OpSize; // R16 <<= imm16
def SHLir32 : I2A8 <"shl", 0xC1, MRMS4r >; // R32 <<= imm32
def SHLri8 : I2A8 <"shl", 0xC0, MRMS4r >; // R8 <<= imm8
def SHLri16 : I2A8 <"shl", 0xC1, MRMS4r >, OpSize; // R16 <<= imm16
def SHLri32 : I2A8 <"shl", 0xC1, MRMS4r >; // R32 <<= imm32
def SHRrr8 : I2A8 <"shr", 0xD2, MRMS5r > , UsesCL; // R8 >>= cl
def SHRrr16 : I2A8 <"shr", 0xD3, MRMS5r >, OpSize, UsesCL; // R16 >>= cl
def SHRrr32 : I2A8 <"shr", 0xD3, MRMS5r > , UsesCL; // R32 >>= cl
def SHRir8 : I2A8 <"shr", 0xC0, MRMS5r >; // R8 >>= imm8
def SHRir16 : I2A8 <"shr", 0xC1, MRMS5r >, OpSize; // R16 >>= imm16
def SHRir32 : I2A8 <"shr", 0xC1, MRMS5r >; // R32 >>= imm32
def SHRri8 : I2A8 <"shr", 0xC0, MRMS5r >; // R8 >>= imm8
def SHRri16 : I2A8 <"shr", 0xC1, MRMS5r >, OpSize; // R16 >>= imm16
def SHRri32 : I2A8 <"shr", 0xC1, MRMS5r >; // R32 >>= imm32
def SARrr8 : I2A8 <"sar", 0xD2, MRMS7r > , UsesCL; // R8 >>>= cl
def SARrr16 : I2A8 <"sar", 0xD3, MRMS7r >, OpSize, UsesCL; // R16 >>>= cl
def SARrr32 : I2A8 <"sar", 0xD3, MRMS7r > , UsesCL; // R32 >>>= cl
def SARir8 : I2A8 <"sar", 0xC0, MRMS7r >; // R8 >>>= imm8
def SARir16 : I2A8 <"sar", 0xC1, MRMS7r >, OpSize; // R16 >>>= imm16
def SARir32 : I2A8 <"sar", 0xC1, MRMS7r >; // R32 >>>= imm32
def SARri8 : I2A8 <"sar", 0xC0, MRMS7r >; // R8 >>>= imm8
def SARri16 : I2A8 <"sar", 0xC1, MRMS7r >, OpSize; // R16 >>>= imm16
def SARri32 : I2A8 <"sar", 0xC1, MRMS7r >; // R32 >>>= imm32
def SHLDrr32 : I2A8 <"shld", 0xA5, MRMDestReg>, TB, UsesCL; // R32 <<= R32,R32 cl
def SHLDir32 : I2A8 <"shld", 0xA4, MRMDestReg>, TB; // R32 <<= R32,R32 imm8
def SHLDri32 : I2A8 <"shld", 0xA4, MRMDestReg>, TB; // R32 <<= R32,R32 imm8
def SHRDrr32 : I2A8 <"shrd", 0xAD, MRMDestReg>, TB, UsesCL; // R32 >>= R32,R32 cl
def SHRDir32 : I2A8 <"shrd", 0xAC, MRMDestReg>, TB; // R32 >>= R32,R32 imm8
def SHRDri32 : I2A8 <"shrd", 0xAC, MRMDestReg>, TB; // R32 >>= R32,R32 imm8
// Condition code ops, incl. set if equal/not equal/...
def SAHF : X86Inst<"sahf" , 0x9E, RawFrm, Arg8>, Imp<[AH],[]>; // flags = AH

View File

@ -395,7 +395,7 @@ bool SSAPH::OptimizeAddress(MachineInstr *MI, unsigned OpNo) {
}
break;
case X86::SHLir32:
case X86::SHLri32:
// If this shift could be folded into the index portion of the address if
// it were the index register, move it to the index register operand now,
// so it will be folded in below.
@ -413,7 +413,7 @@ bool SSAPH::OptimizeAddress(MachineInstr *MI, unsigned OpNo) {
// Attempt to fold instructions used by the index into the instruction
if (MachineInstr *DefInst = getDefiningInst(IndexRegOp)) {
switch (DefInst->getOpcode()) {
case X86::SHLir32: {
case X86::SHLri32: {
// Figure out what the resulting scale would be if we folded this shift.
unsigned ResScale = Scale * (1 << DefInst->getOperand(2).getImmedValue());
if (isValidScaleAmount(ResScale)) {