mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 04:30:12 +00:00
Cleaned up code layout, spacing, etc. for readability purposes and to be more
consistent with the style of LLVM's code base (and itself! it's inconsistent in some places.) No functional changes were made. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@6265 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
5a7a849403
commit
81b0686f09
@ -97,31 +97,28 @@ CreateSETUWConst(const TargetMachine& target, uint32_t C,
|
||||
bool smallNegValue =isSigned && sC < 0 && sC != -sC && -sC < (int32_t)MAXSIMM;
|
||||
|
||||
// Set the high 22 bits in dest if non-zero and simm13 field of OR not enough
|
||||
if (!smallNegValue && (C & ~MAXLO) && C > MAXSIMM)
|
||||
{
|
||||
miSETHI = BuildMI(V9::SETHI, 2).addZImm(C).addRegDef(dest);
|
||||
miSETHI->setOperandHi32(0);
|
||||
mvec.push_back(miSETHI);
|
||||
}
|
||||
if (!smallNegValue && (C & ~MAXLO) && C > MAXSIMM) {
|
||||
miSETHI = BuildMI(V9::SETHI, 2).addZImm(C).addRegDef(dest);
|
||||
miSETHI->setOperandHi32(0);
|
||||
mvec.push_back(miSETHI);
|
||||
}
|
||||
|
||||
// Set the low 10 or 12 bits in dest. This is necessary if no SETHI
|
||||
// was generated, or if the low 10 bits are non-zero.
|
||||
if (miSETHI==NULL || C & MAXLO)
|
||||
{
|
||||
if (miSETHI)
|
||||
{ // unsigned value with high-order bits set using SETHI
|
||||
miOR = BuildMI(V9::OR,3).addReg(dest).addZImm(C).addRegDef(dest);
|
||||
miOR->setOperandLo32(1);
|
||||
}
|
||||
else
|
||||
{ // unsigned or small signed value that fits in simm13 field of OR
|
||||
assert(smallNegValue || (C & ~MAXSIMM) == 0);
|
||||
miOR = BuildMI(V9::OR, 3).addMReg(target.getRegInfo()
|
||||
.getZeroRegNum())
|
||||
.addSImm(sC).addRegDef(dest);
|
||||
}
|
||||
mvec.push_back(miOR);
|
||||
if (miSETHI==NULL || C & MAXLO) {
|
||||
if (miSETHI) {
|
||||
// unsigned value with high-order bits set using SETHI
|
||||
miOR = BuildMI(V9::OR,3).addReg(dest).addZImm(C).addRegDef(dest);
|
||||
miOR->setOperandLo32(1);
|
||||
} else {
|
||||
// unsigned or small signed value that fits in simm13 field of OR
|
||||
assert(smallNegValue || (C & ~MAXSIMM) == 0);
|
||||
miOR = BuildMI(V9::OR, 3).addMReg(target.getRegInfo()
|
||||
.getZeroRegNum())
|
||||
.addSImm(sC).addRegDef(dest);
|
||||
}
|
||||
mvec.push_back(miOR);
|
||||
}
|
||||
|
||||
assert((miSETHI || miOR) && "Oops, no code was generated!");
|
||||
}
|
||||
@ -266,17 +263,16 @@ CreateUIntSetInstruction(const TargetMachine& target,
|
||||
static const uint64_t lo32 = (uint32_t) ~0;
|
||||
if (C <= lo32) // High 32 bits are 0. Set low 32 bits.
|
||||
CreateSETUWConst(target, (uint32_t) C, dest, mvec);
|
||||
else if ((C & ~lo32) == ~lo32 && (C & (1 << 31)))
|
||||
{ // All high 33 (not 32) bits are 1s: sign-extension will take care
|
||||
// of high 32 bits, so use the sequence for signed int
|
||||
CreateSETSWConst(target, (int32_t) C, dest, mvec);
|
||||
}
|
||||
else if (C > lo32)
|
||||
{ // C does not fit in 32 bits
|
||||
TmpInstruction* tmpReg = new TmpInstruction(Type::IntTy);
|
||||
mcfi.addTemp(tmpReg);
|
||||
CreateSETXConst(target, C, tmpReg, dest, mvec);
|
||||
}
|
||||
else if ((C & ~lo32) == ~lo32 && (C & (1 << 31))) {
|
||||
// All high 33 (not 32) bits are 1s: sign-extension will take care
|
||||
// of high 32 bits, so use the sequence for signed int
|
||||
CreateSETSWConst(target, (int32_t) C, dest, mvec);
|
||||
} else if (C > lo32) {
|
||||
// C does not fit in 32 bits
|
||||
TmpInstruction* tmpReg = new TmpInstruction(Type::IntTy);
|
||||
mcfi.addTemp(tmpReg);
|
||||
CreateSETXConst(target, C, tmpReg, dest, mvec);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -425,80 +421,72 @@ UltraSparcInstrInfo::CreateCodeToLoadConst(const TargetMachine& target,
|
||||
if (isa<ConstantPointerRef>(val))
|
||||
val = cast<ConstantPointerRef>(val)->getValue();
|
||||
|
||||
if (isa<GlobalValue>(val))
|
||||
{
|
||||
if (isa<GlobalValue>(val)) {
|
||||
TmpInstruction* tmpReg =
|
||||
new TmpInstruction(PointerType::get(val->getType()), val);
|
||||
mcfi.addTemp(tmpReg);
|
||||
CreateSETXLabel(target, val, tmpReg, dest, mvec);
|
||||
} else if (valType->isIntegral()) {
|
||||
bool isValidConstant;
|
||||
unsigned opSize = target.getTargetData().getTypeSize(val->getType());
|
||||
unsigned destSize = target.getTargetData().getTypeSize(dest->getType());
|
||||
|
||||
if (! dest->getType()->isSigned()) {
|
||||
uint64_t C = GetConstantValueAsUnsignedInt(val, isValidConstant);
|
||||
assert(isValidConstant && "Unrecognized constant");
|
||||
|
||||
if (opSize > destSize || (val->getType()->isSigned() && destSize < 8)) {
|
||||
// operand is larger than dest,
|
||||
// OR both are equal but smaller than the full register size
|
||||
// AND operand is signed, so it may have extra sign bits:
|
||||
// mask high bits
|
||||
C = C & ((1U << 8*destSize) - 1);
|
||||
}
|
||||
CreateUIntSetInstruction(target, C, dest, mvec, mcfi);
|
||||
} else {
|
||||
int64_t C = GetConstantValueAsSignedInt(val, isValidConstant);
|
||||
assert(isValidConstant && "Unrecognized constant");
|
||||
|
||||
if (opSize > destSize)
|
||||
// operand is larger than dest: mask high bits
|
||||
C = C & ((1U << 8*destSize) - 1);
|
||||
|
||||
if (opSize > destSize ||
|
||||
(opSize == destSize && !val->getType()->isSigned()))
|
||||
// sign-extend from destSize to 64 bits
|
||||
C = ((C & (1U << (8*destSize - 1)))
|
||||
? C | ~((1U << 8*destSize) - 1)
|
||||
: C);
|
||||
|
||||
CreateIntSetInstruction(target, C, dest, mvec, mcfi);
|
||||
}
|
||||
else if (valType->isIntegral())
|
||||
{
|
||||
bool isValidConstant;
|
||||
unsigned opSize = target.getTargetData().getTypeSize(val->getType());
|
||||
unsigned destSize = target.getTargetData().getTypeSize(dest->getType());
|
||||
} else {
|
||||
// Make an instruction sequence to load the constant, viz:
|
||||
// SETX <addr-of-constant>, tmpReg, addrReg
|
||||
// LOAD /*addr*/ addrReg, /*offset*/ 0, dest
|
||||
|
||||
if (! dest->getType()->isSigned())
|
||||
{
|
||||
uint64_t C = GetConstantValueAsUnsignedInt(val, isValidConstant);
|
||||
assert(isValidConstant && "Unrecognized constant");
|
||||
// First, create a tmp register to be used by the SETX sequence.
|
||||
TmpInstruction* tmpReg =
|
||||
new TmpInstruction(PointerType::get(val->getType()), val);
|
||||
mcfi.addTemp(tmpReg);
|
||||
|
||||
if (opSize > destSize || (val->getType()->isSigned() && destSize < 8))
|
||||
{ // operand is larger than dest,
|
||||
// OR both are equal but smaller than the full register size
|
||||
// AND operand is signed, so it may have extra sign bits:
|
||||
// mask high bits
|
||||
C = C & ((1U << 8*destSize) - 1);
|
||||
}
|
||||
CreateUIntSetInstruction(target, C, dest, mvec, mcfi);
|
||||
}
|
||||
else
|
||||
{
|
||||
int64_t C = GetConstantValueAsSignedInt(val, isValidConstant);
|
||||
assert(isValidConstant && "Unrecognized constant");
|
||||
// Create another TmpInstruction for the address register
|
||||
TmpInstruction* addrReg =
|
||||
new TmpInstruction(PointerType::get(val->getType()), val);
|
||||
mcfi.addTemp(addrReg);
|
||||
|
||||
if (opSize > destSize)
|
||||
// operand is larger than dest: mask high bits
|
||||
C = C & ((1U << 8*destSize) - 1);
|
||||
// Put the address (a symbolic name) into a register
|
||||
CreateSETXLabel(target, val, tmpReg, addrReg, mvec);
|
||||
|
||||
if (opSize > destSize ||
|
||||
(opSize == destSize && !val->getType()->isSigned()))
|
||||
// sign-extend from destSize to 64 bits
|
||||
C = ((C & (1U << (8*destSize - 1)))
|
||||
? C | ~((1U << 8*destSize) - 1)
|
||||
: C);
|
||||
// Generate the load instruction
|
||||
int64_t zeroOffset = 0; // to avoid ambiguity with (Value*) 0
|
||||
unsigned Opcode = ChooseLoadInstruction(val->getType());
|
||||
mvec.push_back(BuildMI(Opcode, 3).addReg(addrReg).
|
||||
addSImm(zeroOffset).addRegDef(dest));
|
||||
|
||||
CreateIntSetInstruction(target, C, dest, mvec, mcfi);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Make an instruction sequence to load the constant, viz:
|
||||
// SETX <addr-of-constant>, tmpReg, addrReg
|
||||
// LOAD /*addr*/ addrReg, /*offset*/ 0, dest
|
||||
|
||||
// First, create a tmp register to be used by the SETX sequence.
|
||||
TmpInstruction* tmpReg =
|
||||
new TmpInstruction(PointerType::get(val->getType()), val);
|
||||
mcfi.addTemp(tmpReg);
|
||||
|
||||
// Create another TmpInstruction for the address register
|
||||
TmpInstruction* addrReg =
|
||||
new TmpInstruction(PointerType::get(val->getType()), val);
|
||||
mcfi.addTemp(addrReg);
|
||||
|
||||
// Put the address (a symbolic name) into a register
|
||||
CreateSETXLabel(target, val, tmpReg, addrReg, mvec);
|
||||
|
||||
// Generate the load instruction
|
||||
int64_t zeroOffset = 0; // to avoid ambiguity with (Value*) 0
|
||||
unsigned Opcode = ChooseLoadInstruction(val->getType());
|
||||
mvec.push_back(BuildMI(Opcode, 3).addReg(addrReg).
|
||||
addSImm(zeroOffset).addRegDef(dest));
|
||||
|
||||
// Make sure constant is emitted to constant pool in assembly code.
|
||||
MachineFunction::get(F).getInfo()->addToConstantPool(cast<Constant>(val));
|
||||
}
|
||||
// Make sure constant is emitted to constant pool in assembly code.
|
||||
MachineFunction::get(F).getInfo()->addToConstantPool(cast<Constant>(val));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -535,16 +523,16 @@ UltraSparcInstrInfo::CreateCodeToCopyIntToFloat(const TargetMachine& target,
|
||||
// Note that the store instruction is the same for signed and unsigned ints.
|
||||
const Type* storeType = (srcSize <= 4)? Type::IntTy : Type::LongTy;
|
||||
Value* storeVal = val;
|
||||
if (srcSize < target.getTargetData().getTypeSize(Type::FloatTy))
|
||||
{ // sign- or zero-extend respectively
|
||||
storeVal = new TmpInstruction(storeType, val);
|
||||
if (val->getType()->isSigned())
|
||||
CreateSignExtensionInstructions(target, F, val, storeVal, 8*srcSize,
|
||||
mvec, mcfi);
|
||||
else
|
||||
CreateZeroExtensionInstructions(target, F, val, storeVal, 8*srcSize,
|
||||
mvec, mcfi);
|
||||
}
|
||||
if (srcSize < target.getTargetData().getTypeSize(Type::FloatTy)) {
|
||||
// sign- or zero-extend respectively
|
||||
storeVal = new TmpInstruction(storeType, val);
|
||||
if (val->getType()->isSigned())
|
||||
CreateSignExtensionInstructions(target, F, val, storeVal, 8*srcSize,
|
||||
mvec, mcfi);
|
||||
else
|
||||
CreateZeroExtensionInstructions(target, F, val, storeVal, 8*srcSize,
|
||||
mvec, mcfi);
|
||||
}
|
||||
|
||||
unsigned FPReg = target.getRegInfo().getFramePointer();
|
||||
mvec.push_back(BuildMI(ChooseStoreInstruction(storeType), 3)
|
||||
@ -622,8 +610,7 @@ UltraSparcInstrInfo::CreateCopyInstructionsByType(const TargetMachine& target,
|
||||
const Type* resultType = dest->getType();
|
||||
|
||||
MachineOpCode opCode = ChooseAddInstructionByType(resultType);
|
||||
if (opCode == V9::INVALID_OPCODE)
|
||||
{
|
||||
if (opCode == V9::INVALID_OPCODE) {
|
||||
assert(0 && "Unsupported result type in CreateCopyInstructionsByType()");
|
||||
return;
|
||||
}
|
||||
@ -632,8 +619,7 @@ UltraSparcInstrInfo::CreateCopyInstructionsByType(const TargetMachine& target,
|
||||
// a global variable (i.e., a constant address), generate a load
|
||||
// instruction instead of an add
|
||||
//
|
||||
if (isa<Constant>(src))
|
||||
{
|
||||
if (isa<Constant>(src)) {
|
||||
unsigned int machineRegNum;
|
||||
int64_t immedValue;
|
||||
MachineOperand::MachineOperandType opType =
|
||||
@ -646,14 +632,13 @@ UltraSparcInstrInfo::CreateCopyInstructionsByType(const TargetMachine& target,
|
||||
else if (isa<GlobalValue>(src))
|
||||
loadConstantToReg = true;
|
||||
|
||||
if (loadConstantToReg)
|
||||
{ // `src' is constant and cannot fit in immed field for the ADD
|
||||
if (loadConstantToReg) {
|
||||
// `src' is constant and cannot fit in immed field for the ADD
|
||||
// Insert instructions to "load" the constant into a register
|
||||
target.getInstrInfo().CreateCodeToLoadConst(target, F, src, dest,
|
||||
mvec, mcfi);
|
||||
}
|
||||
else
|
||||
{ // Create an add-with-0 instruction of the appropriate type.
|
||||
} else {
|
||||
// Create an add-with-0 instruction of the appropriate type.
|
||||
// Make `src' the second operand, in case it is a constant
|
||||
// Use (unsigned long) 0 for a NULL pointer value.
|
||||
//
|
||||
@ -682,8 +667,8 @@ CreateBitExtensionInstructions(bool signExtend,
|
||||
|
||||
assert(numLowBits <= 32 && "Otherwise, nothing should be done here!");
|
||||
|
||||
if (numLowBits < 32)
|
||||
{ // SLL is needed since operand size is < 32 bits.
|
||||
if (numLowBits < 32) {
|
||||
// SLL is needed since operand size is < 32 bits.
|
||||
TmpInstruction *tmpI = new TmpInstruction(destVal->getType(),
|
||||
srcVal, destVal, "make32");
|
||||
mcfi.addTemp(tmpI);
|
||||
|
@ -97,50 +97,49 @@ FoldGetElemChain(InstrTreeNode* ptrNode, std::vector<Value*>& chainIdxVec,
|
||||
InstructionNode* ptrChild = gepNode;
|
||||
while (ptrChild && (ptrChild->getOpLabel() == Instruction::GetElementPtr ||
|
||||
ptrChild->getOpLabel() == GetElemPtrIdx))
|
||||
{
|
||||
// Child is a GetElemPtr instruction
|
||||
gepInst = cast<GetElementPtrInst>(ptrChild->getValue());
|
||||
User::op_iterator OI, firstIdx = gepInst->idx_begin();
|
||||
User::op_iterator lastIdx = gepInst->idx_end();
|
||||
bool allConstantOffsets = true;
|
||||
{
|
||||
// Child is a GetElemPtr instruction
|
||||
gepInst = cast<GetElementPtrInst>(ptrChild->getValue());
|
||||
User::op_iterator OI, firstIdx = gepInst->idx_begin();
|
||||
User::op_iterator lastIdx = gepInst->idx_end();
|
||||
bool allConstantOffsets = true;
|
||||
|
||||
// The first index of every GEP must be an array index.
|
||||
assert((*firstIdx)->getType() == Type::LongTy &&
|
||||
"INTERNAL ERROR: Structure index for a pointer type!");
|
||||
// The first index of every GEP must be an array index.
|
||||
assert((*firstIdx)->getType() == Type::LongTy &&
|
||||
"INTERNAL ERROR: Structure index for a pointer type!");
|
||||
|
||||
// If the last instruction had a leading non-zero index, check if the
|
||||
// current one references a sequential (i.e., indexable) type.
|
||||
// If not, the code is not type-safe and we would create an illegal GEP
|
||||
// by folding them, so don't fold any more instructions.
|
||||
//
|
||||
if (lastInstHasLeadingNonZero)
|
||||
if (! isa<SequentialType>(gepInst->getType()->getElementType()))
|
||||
break; // cannot fold in any preceding getElementPtr instrs.
|
||||
// If the last instruction had a leading non-zero index, check if the
|
||||
// current one references a sequential (i.e., indexable) type.
|
||||
// If not, the code is not type-safe and we would create an illegal GEP
|
||||
// by folding them, so don't fold any more instructions.
|
||||
//
|
||||
if (lastInstHasLeadingNonZero)
|
||||
if (! isa<SequentialType>(gepInst->getType()->getElementType()))
|
||||
break; // cannot fold in any preceding getElementPtr instrs.
|
||||
|
||||
// Check that all offsets are constant for this instruction
|
||||
for (OI = firstIdx; allConstantOffsets && OI != lastIdx; ++OI)
|
||||
allConstantOffsets = isa<ConstantInt>(*OI);
|
||||
// Check that all offsets are constant for this instruction
|
||||
for (OI = firstIdx; allConstantOffsets && OI != lastIdx; ++OI)
|
||||
allConstantOffsets = isa<ConstantInt>(*OI);
|
||||
|
||||
if (allConstantOffsets)
|
||||
{ // Get pointer value out of ptrChild.
|
||||
ptrVal = gepInst->getPointerOperand();
|
||||
if (allConstantOffsets) {
|
||||
// Get pointer value out of ptrChild.
|
||||
ptrVal = gepInst->getPointerOperand();
|
||||
|
||||
// Remember if it has leading zero index: it will be discarded later.
|
||||
lastInstHasLeadingNonZero = ! IsZero(*firstIdx);
|
||||
// Remember if it has leading zero index: it will be discarded later.
|
||||
lastInstHasLeadingNonZero = ! IsZero(*firstIdx);
|
||||
|
||||
// Insert its index vector at the start, skipping any leading [0]
|
||||
chainIdxVec.insert(chainIdxVec.begin(),
|
||||
firstIdx + !lastInstHasLeadingNonZero, lastIdx);
|
||||
// Insert its index vector at the start, skipping any leading [0]
|
||||
chainIdxVec.insert(chainIdxVec.begin(),
|
||||
firstIdx + !lastInstHasLeadingNonZero, lastIdx);
|
||||
|
||||
// Mark the folded node so no code is generated for it.
|
||||
((InstructionNode*) ptrChild)->markFoldedIntoParent();
|
||||
// Mark the folded node so no code is generated for it.
|
||||
((InstructionNode*) ptrChild)->markFoldedIntoParent();
|
||||
|
||||
// Get the previous GEP instruction and continue trying to fold
|
||||
ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
|
||||
}
|
||||
else // cannot fold this getElementPtr instr. or any preceding ones
|
||||
break;
|
||||
}
|
||||
// Get the previous GEP instruction and continue trying to fold
|
||||
ptrChild = dyn_cast<InstructionNode>(ptrChild->leftChild());
|
||||
} else // cannot fold this getElementPtr instr. or any preceding ones
|
||||
break;
|
||||
}
|
||||
|
||||
// If the first getElementPtr instruction had a leading [0], add it back.
|
||||
// Note that this instruction is the *last* one successfully folded above.
|
||||
@ -186,11 +185,10 @@ GetGEPInstArgs(InstructionNode* gepNode,
|
||||
bool foldedGEPs = false;
|
||||
bool leadingNonZeroIdx = gepI && ! IsZero(*gepI->idx_begin());
|
||||
if (allConstantIndices)
|
||||
if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec, leadingNonZeroIdx))
|
||||
{
|
||||
ptrVal = newPtr;
|
||||
foldedGEPs = true;
|
||||
}
|
||||
if (Value* newPtr = FoldGetElemChain(ptrChild, idxVec, leadingNonZeroIdx)) {
|
||||
ptrVal = newPtr;
|
||||
foldedGEPs = true;
|
||||
}
|
||||
|
||||
// Append the index vector of the current instruction.
|
||||
// Skip the leading [0] index if preceding GEPs were folded into this.
|
||||
@ -242,12 +240,12 @@ GetMemInstArgs(InstructionNode* memInstrNode,
|
||||
InstructionNode* gepNode = NULL;
|
||||
if (isa<GetElementPtrInst>(memInst))
|
||||
gepNode = memInstrNode;
|
||||
else if (isa<InstructionNode>(ptrChild) && isa<GetElementPtrInst>(ptrVal))
|
||||
{ // Child of load/store is a GEP and memInst is its only use.
|
||||
// Use its indices and mark it as folded.
|
||||
gepNode = cast<InstructionNode>(ptrChild);
|
||||
gepNode->markFoldedIntoParent();
|
||||
}
|
||||
else if (isa<InstructionNode>(ptrChild) && isa<GetElementPtrInst>(ptrVal)) {
|
||||
// Child of load/store is a GEP and memInst is its only use.
|
||||
// Use its indices and mark it as folded.
|
||||
gepNode = cast<InstructionNode>(ptrChild);
|
||||
gepNode->markFoldedIntoParent();
|
||||
}
|
||||
|
||||
// If there are no indices, return the current pointer.
|
||||
// Else extract the pointer from the GEP and fold the indices.
|
||||
@ -268,18 +266,18 @@ ChooseBprInstruction(const InstructionNode* instrNode)
|
||||
((InstructionNode*) instrNode->leftChild())->getInstruction();
|
||||
|
||||
switch(setCCInstr->getOpcode())
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::BRZ; break;
|
||||
case Instruction::SetNE: opCode = V9::BRNZ; break;
|
||||
case Instruction::SetLE: opCode = V9::BRLEZ; break;
|
||||
case Instruction::SetGE: opCode = V9::BRGEZ; break;
|
||||
case Instruction::SetLT: opCode = V9::BRLZ; break;
|
||||
case Instruction::SetGT: opCode = V9::BRGZ; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
opCode = V9::INVALID_OPCODE;
|
||||
break;
|
||||
}
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::BRZ; break;
|
||||
case Instruction::SetNE: opCode = V9::BRNZ; break;
|
||||
case Instruction::SetLE: opCode = V9::BRLEZ; break;
|
||||
case Instruction::SetGE: opCode = V9::BRGEZ; break;
|
||||
case Instruction::SetLT: opCode = V9::BRLZ; break;
|
||||
case Instruction::SetGT: opCode = V9::BRGZ; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
opCode = V9::INVALID_OPCODE;
|
||||
break;
|
||||
}
|
||||
|
||||
return opCode;
|
||||
}
|
||||
@ -293,36 +291,33 @@ ChooseBpccInstruction(const InstructionNode* instrNode,
|
||||
|
||||
bool isSigned = setCCInstr->getOperand(0)->getType()->isSigned();
|
||||
|
||||
if (isSigned)
|
||||
if (isSigned) {
|
||||
switch(setCCInstr->getOpcode())
|
||||
{
|
||||
switch(setCCInstr->getOpcode())
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::BE; break;
|
||||
case Instruction::SetNE: opCode = V9::BNE; break;
|
||||
case Instruction::SetLE: opCode = V9::BLE; break;
|
||||
case Instruction::SetGE: opCode = V9::BGE; break;
|
||||
case Instruction::SetLT: opCode = V9::BL; break;
|
||||
case Instruction::SetGT: opCode = V9::BG; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
break;
|
||||
}
|
||||
case Instruction::SetEQ: opCode = V9::BE; break;
|
||||
case Instruction::SetNE: opCode = V9::BNE; break;
|
||||
case Instruction::SetLE: opCode = V9::BLE; break;
|
||||
case Instruction::SetGE: opCode = V9::BGE; break;
|
||||
case Instruction::SetLT: opCode = V9::BL; break;
|
||||
case Instruction::SetGT: opCode = V9::BG; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
break;
|
||||
}
|
||||
else
|
||||
} else {
|
||||
switch(setCCInstr->getOpcode())
|
||||
{
|
||||
switch(setCCInstr->getOpcode())
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::BE; break;
|
||||
case Instruction::SetNE: opCode = V9::BNE; break;
|
||||
case Instruction::SetLE: opCode = V9::BLEU; break;
|
||||
case Instruction::SetGE: opCode = V9::BCC; break;
|
||||
case Instruction::SetLT: opCode = V9::BCS; break;
|
||||
case Instruction::SetGT: opCode = V9::BGU; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
break;
|
||||
}
|
||||
case Instruction::SetEQ: opCode = V9::BE; break;
|
||||
case Instruction::SetNE: opCode = V9::BNE; break;
|
||||
case Instruction::SetLE: opCode = V9::BLEU; break;
|
||||
case Instruction::SetGE: opCode = V9::BCC; break;
|
||||
case Instruction::SetLT: opCode = V9::BCS; break;
|
||||
case Instruction::SetGT: opCode = V9::BGU; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return opCode;
|
||||
}
|
||||
@ -334,17 +329,17 @@ ChooseBFpccInstruction(const InstructionNode* instrNode,
|
||||
MachineOpCode opCode = V9::INVALID_OPCODE;
|
||||
|
||||
switch(setCCInstr->getOpcode())
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::FBE; break;
|
||||
case Instruction::SetNE: opCode = V9::FBNE; break;
|
||||
case Instruction::SetLE: opCode = V9::FBLE; break;
|
||||
case Instruction::SetGE: opCode = V9::FBGE; break;
|
||||
case Instruction::SetLT: opCode = V9::FBL; break;
|
||||
case Instruction::SetGT: opCode = V9::FBG; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
break;
|
||||
}
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::FBE; break;
|
||||
case Instruction::SetNE: opCode = V9::FBNE; break;
|
||||
case Instruction::SetLE: opCode = V9::FBLE; break;
|
||||
case Instruction::SetGE: opCode = V9::FBGE; break;
|
||||
case Instruction::SetLT: opCode = V9::FBL; break;
|
||||
case Instruction::SetGT: opCode = V9::FBG; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
break;
|
||||
}
|
||||
|
||||
return opCode;
|
||||
}
|
||||
@ -367,11 +362,10 @@ GetTmpForCC(Value* boolVal, const Function *F, const Type* ccType)
|
||||
|
||||
assert(boolVal->getType() == Type::BoolTy && "Weird but ok! Delete assert");
|
||||
|
||||
if (lastFunction != F)
|
||||
{
|
||||
lastFunction = F;
|
||||
boolToTmpCache.clear();
|
||||
}
|
||||
if (lastFunction != F) {
|
||||
lastFunction = F;
|
||||
boolToTmpCache.clear();
|
||||
}
|
||||
|
||||
// Look for tmpI and create a new one otherwise. The new value is
|
||||
// directly written to map using the ref returned by operator[].
|
||||
@ -407,17 +401,17 @@ ChooseMovFpccInstruction(const InstructionNode* instrNode)
|
||||
MachineOpCode opCode = V9::INVALID_OPCODE;
|
||||
|
||||
switch(instrNode->getInstruction()->getOpcode())
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::MOVFE; break;
|
||||
case Instruction::SetNE: opCode = V9::MOVFNE; break;
|
||||
case Instruction::SetLE: opCode = V9::MOVFLE; break;
|
||||
case Instruction::SetGE: opCode = V9::MOVFGE; break;
|
||||
case Instruction::SetLT: opCode = V9::MOVFL; break;
|
||||
case Instruction::SetGT: opCode = V9::MOVFG; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
break;
|
||||
}
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::MOVFE; break;
|
||||
case Instruction::SetNE: opCode = V9::MOVFNE; break;
|
||||
case Instruction::SetLE: opCode = V9::MOVFLE; break;
|
||||
case Instruction::SetGE: opCode = V9::MOVFGE; break;
|
||||
case Instruction::SetLT: opCode = V9::MOVFL; break;
|
||||
case Instruction::SetGT: opCode = V9::MOVFG; break;
|
||||
default:
|
||||
assert(0 && "Unrecognized VM instruction!");
|
||||
break;
|
||||
}
|
||||
|
||||
return opCode;
|
||||
}
|
||||
@ -441,15 +435,15 @@ ChooseMovpccAfterSub(const InstructionNode* instrNode,
|
||||
valueToMove = 1;
|
||||
|
||||
switch(instrNode->getInstruction()->getOpcode())
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::MOVE; break;
|
||||
case Instruction::SetLE: opCode = V9::MOVLE; break;
|
||||
case Instruction::SetGE: opCode = V9::MOVGE; break;
|
||||
case Instruction::SetLT: opCode = V9::MOVL; break;
|
||||
case Instruction::SetGT: opCode = V9::MOVG; break;
|
||||
case Instruction::SetNE: assert(0 && "No move required!"); break;
|
||||
default: assert(0 && "Unrecognized VM instr!"); break;
|
||||
}
|
||||
{
|
||||
case Instruction::SetEQ: opCode = V9::MOVE; break;
|
||||
case Instruction::SetLE: opCode = V9::MOVLE; break;
|
||||
case Instruction::SetGE: opCode = V9::MOVGE; break;
|
||||
case Instruction::SetLT: opCode = V9::MOVL; break;
|
||||
case Instruction::SetGT: opCode = V9::MOVG; break;
|
||||
case Instruction::SetNE: assert(0 && "No move required!"); break;
|
||||
default: assert(0 && "Unrecognized VM instr!"); break;
|
||||
}
|
||||
|
||||
return opCode;
|
||||
}
|
||||
@ -460,41 +454,42 @@ ChooseConvertToFloatInstr(OpLabel vopCode, const Type* opType)
|
||||
MachineOpCode opCode = V9::INVALID_OPCODE;
|
||||
|
||||
switch(vopCode)
|
||||
{
|
||||
case ToFloatTy:
|
||||
if (opType == Type::SByteTy || opType == Type::ShortTy || opType == Type::IntTy)
|
||||
opCode = V9::FITOS;
|
||||
else if (opType == Type::LongTy)
|
||||
opCode = V9::FXTOS;
|
||||
else if (opType == Type::DoubleTy)
|
||||
opCode = V9::FDTOS;
|
||||
else if (opType == Type::FloatTy)
|
||||
;
|
||||
else
|
||||
assert(0 && "Cannot convert this type to FLOAT on SPARC");
|
||||
break;
|
||||
{
|
||||
case ToFloatTy:
|
||||
if (opType == Type::SByteTy || opType == Type::ShortTy ||
|
||||
opType == Type::IntTy)
|
||||
opCode = V9::FITOS;
|
||||
else if (opType == Type::LongTy)
|
||||
opCode = V9::FXTOS;
|
||||
else if (opType == Type::DoubleTy)
|
||||
opCode = V9::FDTOS;
|
||||
else if (opType == Type::FloatTy)
|
||||
;
|
||||
else
|
||||
assert(0 && "Cannot convert this type to FLOAT on SPARC");
|
||||
break;
|
||||
|
||||
case ToDoubleTy:
|
||||
// This is usually used in conjunction with CreateCodeToCopyIntToFloat().
|
||||
// Both functions should treat the integer as a 32-bit value for types
|
||||
// of 4 bytes or less, and as a 64-bit value otherwise.
|
||||
if (opType == Type::SByteTy || opType == Type::UByteTy ||
|
||||
opType == Type::ShortTy || opType == Type::UShortTy ||
|
||||
opType == Type::IntTy || opType == Type::UIntTy)
|
||||
opCode = V9::FITOD;
|
||||
else if (opType == Type::LongTy || opType == Type::ULongTy)
|
||||
opCode = V9::FXTOD;
|
||||
else if (opType == Type::FloatTy)
|
||||
opCode = V9::FSTOD;
|
||||
else if (opType == Type::DoubleTy)
|
||||
;
|
||||
else
|
||||
assert(0 && "Cannot convert this type to DOUBLE on SPARC");
|
||||
break;
|
||||
case ToDoubleTy:
|
||||
// This is usually used in conjunction with CreateCodeToCopyIntToFloat().
|
||||
// Both functions should treat the integer as a 32-bit value for types
|
||||
// of 4 bytes or less, and as a 64-bit value otherwise.
|
||||
if (opType == Type::SByteTy || opType == Type::UByteTy ||
|
||||
opType == Type::ShortTy || opType == Type::UShortTy ||
|
||||
opType == Type::IntTy || opType == Type::UIntTy)
|
||||
opCode = V9::FITOD;
|
||||
else if (opType == Type::LongTy || opType == Type::ULongTy)
|
||||
opCode = V9::FXTOD;
|
||||
else if (opType == Type::FloatTy)
|
||||
opCode = V9::FSTOD;
|
||||
else if (opType == Type::DoubleTy)
|
||||
;
|
||||
else
|
||||
assert(0 && "Cannot convert this type to DOUBLE on SPARC");
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return opCode;
|
||||
}
|
||||
@ -507,22 +502,17 @@ ChooseConvertFPToIntInstr(Type::PrimitiveID tid, const Type* opType)
|
||||
assert((opType == Type::FloatTy || opType == Type::DoubleTy)
|
||||
&& "This function should only be called for FLOAT or DOUBLE");
|
||||
|
||||
if (tid==Type::UIntTyID)
|
||||
{
|
||||
assert(tid != Type::UIntTyID && "FP-to-uint conversions must be expanded"
|
||||
" into FP->long->uint for SPARC v9: SO RUN PRESELECTION PASS!");
|
||||
}
|
||||
else if (tid==Type::SByteTyID || tid==Type::ShortTyID || tid==Type::IntTyID ||
|
||||
tid==Type::UByteTyID || tid==Type::UShortTyID)
|
||||
{
|
||||
opCode = (opType == Type::FloatTy)? V9::FSTOI : V9::FDTOI;
|
||||
}
|
||||
else if (tid==Type::LongTyID || tid==Type::ULongTyID)
|
||||
{
|
||||
if (tid == Type::UIntTyID) {
|
||||
assert(tid != Type::UIntTyID && "FP-to-uint conversions must be expanded"
|
||||
" into FP->long->uint for SPARC v9: SO RUN PRESELECTION PASS!");
|
||||
} else if (tid == Type::SByteTyID || tid == Type::ShortTyID ||
|
||||
tid == Type::IntTyID || tid == Type::UByteTyID ||
|
||||
tid == Type::UShortTyID) {
|
||||
opCode = (opType == Type::FloatTy)? V9::FSTOI : V9::FDTOI;
|
||||
} else if (tid == Type::LongTyID || tid == Type::ULongTyID) {
|
||||
opCode = (opType == Type::FloatTy)? V9::FSTOX : V9::FDTOX;
|
||||
}
|
||||
else
|
||||
assert(0 && "Should not get here, Mo!");
|
||||
} else
|
||||
assert(0 && "Should not get here, Mo!");
|
||||
|
||||
return opCode;
|
||||
}
|
||||
@ -611,11 +601,11 @@ CreateAddConstInstruction(const InstructionNode* instrNode)
|
||||
// instead of an FADD (1 vs 3 cycles). There is no integer MOV.
|
||||
//
|
||||
if (ConstantFP *FPC = dyn_cast<ConstantFP>(constOp)) {
|
||||
double dval = FPC->getValue();
|
||||
if (dval == 0.0)
|
||||
minstr = CreateMovFloatInstruction(instrNode,
|
||||
instrNode->getInstruction()->getType());
|
||||
}
|
||||
double dval = FPC->getValue();
|
||||
if (dval == 0.0)
|
||||
minstr = CreateMovFloatInstruction(instrNode,
|
||||
instrNode->getInstruction()->getType());
|
||||
}
|
||||
|
||||
return minstr;
|
||||
}
|
||||
@ -626,17 +616,16 @@ ChooseSubInstructionByType(const Type* resultType)
|
||||
{
|
||||
MachineOpCode opCode = V9::INVALID_OPCODE;
|
||||
|
||||
if (resultType->isInteger() || isa<PointerType>(resultType))
|
||||
{
|
||||
if (resultType->isInteger() || isa<PointerType>(resultType)) {
|
||||
opCode = V9::SUB;
|
||||
}
|
||||
else
|
||||
} else {
|
||||
switch(resultType->getPrimitiveID())
|
||||
{
|
||||
case Type::FloatTyID: opCode = V9::FSUBS; break;
|
||||
case Type::DoubleTyID: opCode = V9::FSUBD; break;
|
||||
default: assert(0 && "Invalid type for SUB instruction"); break;
|
||||
}
|
||||
{
|
||||
case Type::FloatTyID: opCode = V9::FSUBS; break;
|
||||
case Type::DoubleTyID: opCode = V9::FSUBD; break;
|
||||
default: assert(0 && "Invalid type for SUB instruction"); break;
|
||||
}
|
||||
}
|
||||
|
||||
return opCode;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user