mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2025-02-20 14:29:27 +00:00
Rearrange and refactor some code. No functionality changes.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@12203 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
9440db8866
commit
721d2d4a6e
@ -199,8 +199,14 @@ namespace {
|
||||
///
|
||||
void promote32(unsigned targetReg, const ValueRecord &VR);
|
||||
|
||||
// getGEPIndex - This is used to fold GEP instructions into X86 addressing
|
||||
// expressions.
|
||||
/// getAddressingMode - Get the addressing mode to use to address the
|
||||
/// specified value. The returned value should be used with addFullAddress.
|
||||
void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
|
||||
unsigned &IndexReg, unsigned &Disp);
|
||||
|
||||
|
||||
/// getGEPIndex - This is used to fold GEP instructions into X86 addressing
|
||||
/// expressions.
|
||||
void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
|
||||
std::vector<Value*> &GEPOps,
|
||||
std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
|
||||
@ -1416,8 +1422,9 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
|
||||
void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
|
||||
unsigned DestReg = getReg(B);
|
||||
MachineBasicBlock::iterator MI = BB->end();
|
||||
emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1),
|
||||
OperatorClass, DestReg);
|
||||
Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
|
||||
|
||||
emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
|
||||
}
|
||||
|
||||
/// emitSimpleBinaryOperation - Implement simple binary operators for integral
|
||||
@ -1459,83 +1466,83 @@ void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isa<ConstantInt>(Op1) || Class == cLong) {
|
||||
static const unsigned OpcodeTab[][4] = {
|
||||
// Arithmetic operators
|
||||
{ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD }, // ADD
|
||||
{ X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB }, // SUB
|
||||
|
||||
// Bitwise operators
|
||||
{ X86::AND8rr, X86::AND16rr, X86::AND32rr, 0 }, // AND
|
||||
{ X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0 }, // OR
|
||||
{ X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0 }, // XOR
|
||||
};
|
||||
|
||||
bool isLong = false;
|
||||
if (Class == cLong) {
|
||||
isLong = true;
|
||||
Class = cInt; // Bottom 32 bits are handled just like ints
|
||||
}
|
||||
|
||||
unsigned Opcode = OpcodeTab[OperatorClass][Class];
|
||||
assert(Opcode && "Floating point arguments to logical inst?");
|
||||
unsigned Op0r = getReg(Op0, MBB, IP);
|
||||
unsigned Op1r = getReg(Op1, MBB, IP);
|
||||
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
|
||||
|
||||
if (isLong) { // Handle the upper 32 bits of long values...
|
||||
static const unsigned TopTab[] = {
|
||||
X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
|
||||
};
|
||||
BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
|
||||
DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Special case: op Reg, <const>
|
||||
ConstantInt *Op1C = cast<ConstantInt>(Op1);
|
||||
unsigned Op0r = getReg(Op0, MBB, IP);
|
||||
if (Class != cLong && isa<ConstantInt>(Op1)) {
|
||||
ConstantInt *Op1C = cast<ConstantInt>(Op1);
|
||||
unsigned Op0r = getReg(Op0, MBB, IP);
|
||||
|
||||
// xor X, -1 -> not X
|
||||
if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
|
||||
static unsigned const NOTTab[] = { X86::NOT8r, X86::NOT16r, X86::NOT32r };
|
||||
BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
// xor X, -1 -> not X
|
||||
if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
|
||||
static unsigned const NOTTab[] = { X86::NOT8r, X86::NOT16r, X86::NOT32r };
|
||||
BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
|
||||
// add X, -1 -> dec X
|
||||
if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
|
||||
static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
|
||||
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
// add X, -1 -> dec X
|
||||
if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
|
||||
static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
|
||||
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
|
||||
// add X, 1 -> inc X
|
||||
if (OperatorClass == 0 && Op1C->equalsInt(1)) {
|
||||
static unsigned const DECTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
|
||||
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
// add X, 1 -> inc X
|
||||
if (OperatorClass == 0 && Op1C->equalsInt(1)) {
|
||||
static unsigned const DECTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
|
||||
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
|
||||
static const unsigned OpcodeTab[][3] = {
|
||||
// Arithmetic operators
|
||||
{ X86::ADD8ri, X86::ADD16ri, X86::ADD32ri }, // ADD
|
||||
{ X86::SUB8ri, X86::SUB16ri, X86::SUB32ri }, // SUB
|
||||
static const unsigned OpcodeTab[][3] = {
|
||||
// Arithmetic operators
|
||||
{ X86::ADD8ri, X86::ADD16ri, X86::ADD32ri }, // ADD
|
||||
{ X86::SUB8ri, X86::SUB16ri, X86::SUB32ri }, // SUB
|
||||
|
||||
// Bitwise operators
|
||||
{ X86::AND8ri, X86::AND16ri, X86::AND32ri }, // AND
|
||||
{ X86:: OR8ri, X86:: OR16ri, X86:: OR32ri }, // OR
|
||||
{ X86::XOR8ri, X86::XOR16ri, X86::XOR32ri }, // XOR
|
||||
};
|
||||
|
||||
assert(Class < cFP && "General code handles 64-bit integer types!");
|
||||
unsigned Opcode = OpcodeTab[OperatorClass][Class];
|
||||
|
||||
|
||||
uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
|
||||
BuildMI(*MBB, IP, Opcode, 5, DestReg).addReg(Op0r).addImm(Op1v);
|
||||
return;
|
||||
}
|
||||
|
||||
// Finally, handle the general case now.
|
||||
static const unsigned OpcodeTab[][4] = {
|
||||
// Arithmetic operators
|
||||
{ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD }, // ADD
|
||||
{ X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB }, // SUB
|
||||
|
||||
// Bitwise operators
|
||||
{ X86::AND8ri, X86::AND16ri, X86::AND32ri }, // AND
|
||||
{ X86:: OR8ri, X86:: OR16ri, X86:: OR32ri }, // OR
|
||||
{ X86::XOR8ri, X86::XOR16ri, X86::XOR32ri }, // XOR
|
||||
{ X86::AND8rr, X86::AND16rr, X86::AND32rr, 0 }, // AND
|
||||
{ X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0 }, // OR
|
||||
{ X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0 }, // XOR
|
||||
};
|
||||
|
||||
assert(Class < 3 && "General code handles 64-bit integer types!");
|
||||
|
||||
bool isLong = false;
|
||||
if (Class == cLong) {
|
||||
isLong = true;
|
||||
Class = cInt; // Bottom 32 bits are handled just like ints
|
||||
}
|
||||
|
||||
unsigned Opcode = OpcodeTab[OperatorClass][Class];
|
||||
uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
|
||||
|
||||
// Mask off any upper bits of the constant, if there are any...
|
||||
Op1v &= (1ULL << (8 << Class)) - 1;
|
||||
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1v);
|
||||
assert(Opcode && "Floating point arguments to logical inst?");
|
||||
unsigned Op0r = getReg(Op0, MBB, IP);
|
||||
unsigned Op1r = getReg(Op1, MBB, IP);
|
||||
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
|
||||
|
||||
if (isLong) { // Handle the upper 32 bits of long values...
|
||||
static const unsigned TopTab[] = {
|
||||
X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
|
||||
};
|
||||
BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
|
||||
DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
|
||||
}
|
||||
}
|
||||
|
||||
/// doMultiply - Emit appropriate instructions to multiply together the
|
||||
@ -1904,6 +1911,26 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
|
||||
}
|
||||
|
||||
|
||||
void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
|
||||
unsigned &IndexReg, unsigned &Disp) {
|
||||
BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
|
||||
if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
return;
|
||||
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr)
|
||||
if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
return;
|
||||
}
|
||||
|
||||
// If it's not foldable, reset addr mode.
|
||||
BaseReg = getReg(Addr);
|
||||
Scale = 1; IndexReg = 0; Disp = 0;
|
||||
}
|
||||
|
||||
|
||||
/// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
|
||||
/// instruction. The load and store instructions are the only place where we
|
||||
/// need to worry about the memory layout of the target machine.
|
||||
@ -1911,23 +1938,7 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
|
||||
void ISel::visitLoadInst(LoadInst &I) {
|
||||
unsigned DestReg = getReg(I);
|
||||
unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
|
||||
Value *Addr = I.getOperand(0);
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
|
||||
if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
Addr = 0; // Address is consumed!
|
||||
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr)
|
||||
if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
Addr = 0;
|
||||
}
|
||||
|
||||
if (Addr) {
|
||||
// If it's not foldable, reset addr mode.
|
||||
BaseReg = getReg(Addr);
|
||||
Scale = 1; IndexReg = 0; Disp = 0;
|
||||
}
|
||||
getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
|
||||
|
||||
unsigned Class = getClassB(I.getType());
|
||||
if (Class == cLong) {
|
||||
@ -1951,24 +1962,8 @@ void ISel::visitLoadInst(LoadInst &I) {
|
||||
/// instruction.
|
||||
///
|
||||
void ISel::visitStoreInst(StoreInst &I) {
|
||||
unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
|
||||
Value *Addr = I.getOperand(1);
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
|
||||
if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
Addr = 0; // Address is consumed!
|
||||
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr)
|
||||
if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
Addr = 0;
|
||||
}
|
||||
|
||||
if (Addr) {
|
||||
// If it's not foldable, reset addr mode.
|
||||
BaseReg = getReg(Addr);
|
||||
Scale = 1; IndexReg = 0; Disp = 0;
|
||||
}
|
||||
unsigned BaseReg, Scale, IndexReg, Disp;
|
||||
getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
|
||||
|
||||
const Type *ValTy = I.getOperand(0)->getType();
|
||||
unsigned Class = getClassB(ValTy);
|
||||
|
@ -199,8 +199,14 @@ namespace {
|
||||
///
|
||||
void promote32(unsigned targetReg, const ValueRecord &VR);
|
||||
|
||||
// getGEPIndex - This is used to fold GEP instructions into X86 addressing
|
||||
// expressions.
|
||||
/// getAddressingMode - Get the addressing mode to use to address the
|
||||
/// specified value. The returned value should be used with addFullAddress.
|
||||
void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
|
||||
unsigned &IndexReg, unsigned &Disp);
|
||||
|
||||
|
||||
/// getGEPIndex - This is used to fold GEP instructions into X86 addressing
|
||||
/// expressions.
|
||||
void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
|
||||
std::vector<Value*> &GEPOps,
|
||||
std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
|
||||
@ -1416,8 +1422,9 @@ void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) {
|
||||
void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
|
||||
unsigned DestReg = getReg(B);
|
||||
MachineBasicBlock::iterator MI = BB->end();
|
||||
emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1),
|
||||
OperatorClass, DestReg);
|
||||
Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
|
||||
|
||||
emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
|
||||
}
|
||||
|
||||
/// emitSimpleBinaryOperation - Implement simple binary operators for integral
|
||||
@ -1459,83 +1466,83 @@ void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!isa<ConstantInt>(Op1) || Class == cLong) {
|
||||
static const unsigned OpcodeTab[][4] = {
|
||||
// Arithmetic operators
|
||||
{ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD }, // ADD
|
||||
{ X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB }, // SUB
|
||||
|
||||
// Bitwise operators
|
||||
{ X86::AND8rr, X86::AND16rr, X86::AND32rr, 0 }, // AND
|
||||
{ X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0 }, // OR
|
||||
{ X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0 }, // XOR
|
||||
};
|
||||
|
||||
bool isLong = false;
|
||||
if (Class == cLong) {
|
||||
isLong = true;
|
||||
Class = cInt; // Bottom 32 bits are handled just like ints
|
||||
}
|
||||
|
||||
unsigned Opcode = OpcodeTab[OperatorClass][Class];
|
||||
assert(Opcode && "Floating point arguments to logical inst?");
|
||||
unsigned Op0r = getReg(Op0, MBB, IP);
|
||||
unsigned Op1r = getReg(Op1, MBB, IP);
|
||||
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
|
||||
|
||||
if (isLong) { // Handle the upper 32 bits of long values...
|
||||
static const unsigned TopTab[] = {
|
||||
X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
|
||||
};
|
||||
BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
|
||||
DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Special case: op Reg, <const>
|
||||
ConstantInt *Op1C = cast<ConstantInt>(Op1);
|
||||
unsigned Op0r = getReg(Op0, MBB, IP);
|
||||
if (Class != cLong && isa<ConstantInt>(Op1)) {
|
||||
ConstantInt *Op1C = cast<ConstantInt>(Op1);
|
||||
unsigned Op0r = getReg(Op0, MBB, IP);
|
||||
|
||||
// xor X, -1 -> not X
|
||||
if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
|
||||
static unsigned const NOTTab[] = { X86::NOT8r, X86::NOT16r, X86::NOT32r };
|
||||
BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
// xor X, -1 -> not X
|
||||
if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
|
||||
static unsigned const NOTTab[] = { X86::NOT8r, X86::NOT16r, X86::NOT32r };
|
||||
BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
|
||||
// add X, -1 -> dec X
|
||||
if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
|
||||
static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
|
||||
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
// add X, -1 -> dec X
|
||||
if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
|
||||
static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
|
||||
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
|
||||
// add X, 1 -> inc X
|
||||
if (OperatorClass == 0 && Op1C->equalsInt(1)) {
|
||||
static unsigned const DECTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
|
||||
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
// add X, 1 -> inc X
|
||||
if (OperatorClass == 0 && Op1C->equalsInt(1)) {
|
||||
static unsigned const DECTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
|
||||
BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
|
||||
return;
|
||||
}
|
||||
|
||||
static const unsigned OpcodeTab[][3] = {
|
||||
// Arithmetic operators
|
||||
{ X86::ADD8ri, X86::ADD16ri, X86::ADD32ri }, // ADD
|
||||
{ X86::SUB8ri, X86::SUB16ri, X86::SUB32ri }, // SUB
|
||||
static const unsigned OpcodeTab[][3] = {
|
||||
// Arithmetic operators
|
||||
{ X86::ADD8ri, X86::ADD16ri, X86::ADD32ri }, // ADD
|
||||
{ X86::SUB8ri, X86::SUB16ri, X86::SUB32ri }, // SUB
|
||||
|
||||
// Bitwise operators
|
||||
{ X86::AND8ri, X86::AND16ri, X86::AND32ri }, // AND
|
||||
{ X86:: OR8ri, X86:: OR16ri, X86:: OR32ri }, // OR
|
||||
{ X86::XOR8ri, X86::XOR16ri, X86::XOR32ri }, // XOR
|
||||
};
|
||||
|
||||
assert(Class < cFP && "General code handles 64-bit integer types!");
|
||||
unsigned Opcode = OpcodeTab[OperatorClass][Class];
|
||||
|
||||
|
||||
uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
|
||||
BuildMI(*MBB, IP, Opcode, 5, DestReg).addReg(Op0r).addImm(Op1v);
|
||||
return;
|
||||
}
|
||||
|
||||
// Finally, handle the general case now.
|
||||
static const unsigned OpcodeTab[][4] = {
|
||||
// Arithmetic operators
|
||||
{ X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD }, // ADD
|
||||
{ X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB }, // SUB
|
||||
|
||||
// Bitwise operators
|
||||
{ X86::AND8ri, X86::AND16ri, X86::AND32ri }, // AND
|
||||
{ X86:: OR8ri, X86:: OR16ri, X86:: OR32ri }, // OR
|
||||
{ X86::XOR8ri, X86::XOR16ri, X86::XOR32ri }, // XOR
|
||||
{ X86::AND8rr, X86::AND16rr, X86::AND32rr, 0 }, // AND
|
||||
{ X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0 }, // OR
|
||||
{ X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0 }, // XOR
|
||||
};
|
||||
|
||||
assert(Class < 3 && "General code handles 64-bit integer types!");
|
||||
|
||||
bool isLong = false;
|
||||
if (Class == cLong) {
|
||||
isLong = true;
|
||||
Class = cInt; // Bottom 32 bits are handled just like ints
|
||||
}
|
||||
|
||||
unsigned Opcode = OpcodeTab[OperatorClass][Class];
|
||||
uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
|
||||
|
||||
// Mask off any upper bits of the constant, if there are any...
|
||||
Op1v &= (1ULL << (8 << Class)) - 1;
|
||||
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1v);
|
||||
assert(Opcode && "Floating point arguments to logical inst?");
|
||||
unsigned Op0r = getReg(Op0, MBB, IP);
|
||||
unsigned Op1r = getReg(Op1, MBB, IP);
|
||||
BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
|
||||
|
||||
if (isLong) { // Handle the upper 32 bits of long values...
|
||||
static const unsigned TopTab[] = {
|
||||
X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
|
||||
};
|
||||
BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
|
||||
DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
|
||||
}
|
||||
}
|
||||
|
||||
/// doMultiply - Emit appropriate instructions to multiply together the
|
||||
@ -1904,6 +1911,26 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
|
||||
}
|
||||
|
||||
|
||||
void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
|
||||
unsigned &IndexReg, unsigned &Disp) {
|
||||
BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
|
||||
if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
return;
|
||||
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr)
|
||||
if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
return;
|
||||
}
|
||||
|
||||
// If it's not foldable, reset addr mode.
|
||||
BaseReg = getReg(Addr);
|
||||
Scale = 1; IndexReg = 0; Disp = 0;
|
||||
}
|
||||
|
||||
|
||||
/// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
|
||||
/// instruction. The load and store instructions are the only place where we
|
||||
/// need to worry about the memory layout of the target machine.
|
||||
@ -1911,23 +1938,7 @@ void ISel::emitShiftOperation(MachineBasicBlock *MBB,
|
||||
void ISel::visitLoadInst(LoadInst &I) {
|
||||
unsigned DestReg = getReg(I);
|
||||
unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
|
||||
Value *Addr = I.getOperand(0);
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
|
||||
if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
Addr = 0; // Address is consumed!
|
||||
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr)
|
||||
if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
Addr = 0;
|
||||
}
|
||||
|
||||
if (Addr) {
|
||||
// If it's not foldable, reset addr mode.
|
||||
BaseReg = getReg(Addr);
|
||||
Scale = 1; IndexReg = 0; Disp = 0;
|
||||
}
|
||||
getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
|
||||
|
||||
unsigned Class = getClassB(I.getType());
|
||||
if (Class == cLong) {
|
||||
@ -1951,24 +1962,8 @@ void ISel::visitLoadInst(LoadInst &I) {
|
||||
/// instruction.
|
||||
///
|
||||
void ISel::visitStoreInst(StoreInst &I) {
|
||||
unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
|
||||
Value *Addr = I.getOperand(1);
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
|
||||
if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
Addr = 0; // Address is consumed!
|
||||
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr)
|
||||
if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
|
||||
BaseReg, Scale, IndexReg, Disp))
|
||||
Addr = 0;
|
||||
}
|
||||
|
||||
if (Addr) {
|
||||
// If it's not foldable, reset addr mode.
|
||||
BaseReg = getReg(Addr);
|
||||
Scale = 1; IndexReg = 0; Disp = 0;
|
||||
}
|
||||
unsigned BaseReg, Scale, IndexReg, Disp;
|
||||
getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
|
||||
|
||||
const Type *ValTy = I.getOperand(0)->getType();
|
||||
unsigned Class = getClassB(ValTy);
|
||||
|
Loading…
x
Reference in New Issue
Block a user