diff --git a/lib/Target/PowerPC/Makefile b/lib/Target/PowerPC/Makefile new file mode 100644 index 00000000000..ef9fff7377b --- /dev/null +++ b/lib/Target/PowerPC/Makefile @@ -0,0 +1,55 @@ +##===- lib/Target/PowerPC/Makefile -------------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file was developed by the LLVM research group and is distributed under +# the University of Illinois Open Source License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +LEVEL = ../../.. +LIBRARYNAME = powerpc +include $(LEVEL)/Makefile.common + +# Make sure that tblgen is run, first thing. +$(SourceDepend): PowerPCGenRegisterInfo.h.inc PowerPCGenRegisterNames.inc \ + PowerPCGenRegisterInfo.inc PowerPCGenInstrNames.inc \ + PowerPCGenInstrInfo.inc PowerPCGenInstrSelector.inc + +PowerPCGenRegisterNames.inc:: $(SourceDir)/PowerPC.td \ + $(SourceDir)/PowerPCReg.td \ + $(SourceDir)/../Target.td $(TBLGEN) + @echo "Building PowerPC.td register names with tblgen" + $(VERB) $(TBLGEN) -I $(BUILD_SRC_DIR) $< -gen-register-enums -o $@ + +PowerPCGenRegisterInfo.h.inc:: $(SourceDir)/PowerPC.td \ + $(SourceDir)/PowerPCReg.td \ + $(SourceDir)/../Target.td $(TBLGEN) + @echo "Building PowerPC.td register information header with tblgen" + $(VERB) $(TBLGEN) -I $(BUILD_SRC_DIR) $< -gen-register-desc-header -o $@ + +PowerPCGenRegisterInfo.inc:: $(SourceDir)/PowerPC.td \ + $(SourceDir)/PowerPCReg.td \ + $(SourceDir)/../Target.td $(TBLGEN) + @echo "Building PowerPC.td register information implementation with tblgen" + $(VERB) $(TBLGEN) -I $(BUILD_SRC_DIR) $< -gen-register-desc -o $@ + +PowerPCGenInstrNames.inc:: $(SourceDir)/PowerPC.td \ + $(SourceDir)/PowerPCInstrs.td \ + $(SourceDir)/../Target.td $(TBLGEN) + @echo "Building PowerPC.td instruction names with tblgen" + $(VERB) $(TBLGEN) -I $(BUILD_SRC_DIR) $< -gen-instr-enums -o $@ + +PowerPCGenInstrInfo.inc:: $(SourceDir)/PowerPC.td \ + $(SourceDir)/PowerPCInstrs.td \ + $(SourceDir)/../Target.td $(TBLGEN) + @echo "Building PowerPC.td instruction information with tblgen" + $(VERB) $(TBLGEN) -I $(BUILD_SRC_DIR) $< -gen-instr-desc -o $@ + +PowerPCGenInstrSelector.inc:: $(SourceDir)/PowerPC.td \ + $(SourceDir)/PowerPCInstrs.td \ + $(SourceDir)/../Target.td $(TBLGEN) + @echo "Building PowerPC.td instruction selector with tblgen" + $(VERB) $(TBLGEN) -I $(BUILD_SRC_DIR) $< -gen-instr-selector -o $@ + +clean:: + $(VERB) rm -f *.inc diff --git a/lib/Target/PowerPC/PPC.h b/lib/Target/PowerPC/PPC.h new file mode 100644 index 00000000000..2f2b990f1e5 --- /dev/null +++ b/lib/Target/PowerPC/PPC.h @@ -0,0 +1,40 @@ +//===-- PowerPC.h - Top-level interface for PowerPC representation -*- C++ -*-// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the entry points for global functions defined in the LLVM +// PowerPC back-end. +// +//===----------------------------------------------------------------------===// + +#ifndef TARGET_POWERPC_H +#define TARGET_POWERPC_H + +#include + +namespace llvm { + +class FunctionPass; +class TargetMachine; + +// Here is where you would define factory methods for powerpc-specific +// passes. For example: +FunctionPass *createPPCSimpleInstructionSelector (TargetMachine &TM); +FunctionPass *createPPCCodePrinterPass(std::ostream &OS, TargetMachine &TM); +} // end namespace llvm; + +// Defines symbolic names for PowerPC registers. This defines a mapping from +// register name to register number. +// +#include "PowerPCGenRegisterNames.inc" + +// Defines symbolic names for the PowerPC instructions. +// +#include "PowerPCGenInstrNames.inc" + +#endif diff --git a/lib/Target/PowerPC/PPC32AsmPrinter.cpp b/lib/Target/PowerPC/PPC32AsmPrinter.cpp new file mode 100644 index 00000000000..697be0907c2 --- /dev/null +++ b/lib/Target/PowerPC/PPC32AsmPrinter.cpp @@ -0,0 +1,694 @@ +//===-- PPC32/Printer.cpp - Convert X86 LLVM code to Intel assembly ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal +// representation of machine-dependent LLVM code to Intel-format +// assembly language. This printer is the output mechanism used +// by `llc' and `lli -print-machineinstrs' on X86. +// +//===----------------------------------------------------------------------===// + +#include + +#include "PowerPC.h" +#include "PowerPCInstrInfo.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/Assembly/Writer.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Support/Mangler.h" +#include "Support/Statistic.h" +#include "Support/StringExtras.h" +#include "Support/CommandLine.h" + +namespace llvm { + +namespace { + Statistic<> EmittedInsts("asm-printer", "Number of machine instrs printed"); + + struct Printer : public MachineFunctionPass { + /// Output stream on which we're printing assembly code. + /// + std::ostream &O; + + /// Target machine description which we query for reg. names, data + /// layout, etc. + /// + TargetMachine &TM; + + /// Name-mangler for global names. + /// + Mangler *Mang; + std::set< std::string > Stubs; + std::set Strings; + + Printer(std::ostream &o, TargetMachine &tm) : O(o), TM(tm) { } + + /// We name each basic block in a Function with a unique number, so + /// that we can consistently refer to them later. This is cleared + /// at the beginning of each call to runOnMachineFunction(). + /// + typedef std::map ValueMapTy; + ValueMapTy NumberForBB; + + /// Cache of mangled name for current function. This is + /// recalculated at the beginning of each call to + /// runOnMachineFunction(). + /// + std::string CurrentFnName; + + virtual const char *getPassName() const { + return "PowerPC Assembly Printer"; + } + + void printMachineInstruction(const MachineInstr *MI); + void printOp(const MachineOperand &MO, + bool elideOffsetKeyword = false); + void printConstantPool(MachineConstantPool *MCP); + bool runOnMachineFunction(MachineFunction &F); + bool doInitialization(Module &M); + bool doFinalization(Module &M); + void emitGlobalConstant(const Constant* CV); + void emitConstantValueOnly(const Constant *CV); + }; +} // end of anonymous namespace + +/// createPPCCodePrinterPass - Returns a pass that prints the X86 +/// assembly code for a MachineFunction to the given output stream, +/// using the given target machine description. This should work +/// regardless of whether the function is in SSA form. +/// +FunctionPass *createPPCCodePrinterPass(std::ostream &o,TargetMachine &tm){ + return new Printer(o, tm); +} + +/// isStringCompatible - Can we treat the specified array as a string? +/// Only if it is an array of ubytes or non-negative sbytes. +/// +static bool isStringCompatible(const ConstantArray *CVA) { + const Type *ETy = cast(CVA->getType())->getElementType(); + if (ETy == Type::UByteTy) return true; + if (ETy != Type::SByteTy) return false; + + for (unsigned i = 0; i < CVA->getNumOperands(); ++i) + if (cast(CVA->getOperand(i))->getValue() < 0) + return false; + + return true; +} + +/// toOctal - Convert the low order bits of X into an octal digit. +/// +static inline char toOctal(int X) { + return (X&7)+'0'; +} + +/// getAsCString - Return the specified array as a C compatible +/// string, only if the predicate isStringCompatible is true. +/// +static void printAsCString(std::ostream &O, const ConstantArray *CVA) { + assert(isStringCompatible(CVA) && "Array is not string compatible!"); + + O << "\""; + for (unsigned i = 0; i < CVA->getNumOperands(); ++i) { + unsigned char C = cast(CVA->getOperand(i))->getRawValue(); + + if (C == '"') { + O << "\\\""; + } else if (C == '\\') { + O << "\\\\"; + } else if (isprint(C)) { + O << C; + } else { + switch(C) { + case '\b': O << "\\b"; break; + case '\f': O << "\\f"; break; + case '\n': O << "\\n"; break; + case '\r': O << "\\r"; break; + case '\t': O << "\\t"; break; + default: + O << '\\'; + O << toOctal(C >> 6); + O << toOctal(C >> 3); + O << toOctal(C >> 0); + break; + } + } + } + O << "\""; +} + +// Print out the specified constant, without a storage class. Only the +// constants valid in constant expressions can occur here. +void Printer::emitConstantValueOnly(const Constant *CV) { + if (CV->isNullValue()) + O << "0"; + else if (const ConstantBool *CB = dyn_cast(CV)) { + assert(CB == ConstantBool::True); + O << "1"; + } else if (const ConstantSInt *CI = dyn_cast(CV)) + O << CI->getValue(); + else if (const ConstantUInt *CI = dyn_cast(CV)) + O << CI->getValue(); + else if (const ConstantPointerRef *CPR = dyn_cast(CV)) + // This is a constant address for a global variable or function. Use the + // name of the variable or function as the address value. + O << Mang->getValueName(CPR->getValue()); + else if (const ConstantExpr *CE = dyn_cast(CV)) { + const TargetData &TD = TM.getTargetData(); + switch(CE->getOpcode()) { + case Instruction::GetElementPtr: { + // generate a symbolic expression for the byte address + const Constant *ptrVal = CE->getOperand(0); + std::vector idxVec(CE->op_begin()+1, CE->op_end()); + if (unsigned Offset = TD.getIndexedOffset(ptrVal->getType(), idxVec)) { + O << "("; + emitConstantValueOnly(ptrVal); + O << ") + " << Offset; + } else { + emitConstantValueOnly(ptrVal); + } + break; + } + case Instruction::Cast: { + // Support only non-converting or widening casts for now, that is, ones + // that do not involve a change in value. This assertion is really gross, + // and may not even be a complete check. + Constant *Op = CE->getOperand(0); + const Type *OpTy = Op->getType(), *Ty = CE->getType(); + + // Remember, kids, pointers on x86 can be losslessly converted back and + // forth into 32-bit or wider integers, regardless of signedness. :-P + assert(((isa(OpTy) + && (Ty == Type::LongTy || Ty == Type::ULongTy + || Ty == Type::IntTy || Ty == Type::UIntTy)) + || (isa(Ty) + && (OpTy == Type::LongTy || OpTy == Type::ULongTy + || OpTy == Type::IntTy || OpTy == Type::UIntTy)) + || (((TD.getTypeSize(Ty) >= TD.getTypeSize(OpTy)) + && OpTy->isLosslesslyConvertibleTo(Ty)))) + && "FIXME: Don't yet support this kind of constant cast expr"); + O << "("; + emitConstantValueOnly(Op); + O << ")"; + break; + } + case Instruction::Add: + O << "("; + emitConstantValueOnly(CE->getOperand(0)); + O << ") + ("; + emitConstantValueOnly(CE->getOperand(1)); + O << ")"; + break; + default: + assert(0 && "Unsupported operator!"); + } + } else { + assert(0 && "Unknown constant value!"); + } +} + +// Print a constant value or values, with the appropriate storage class as a +// prefix. +void Printer::emitGlobalConstant(const Constant *CV) { + const TargetData &TD = TM.getTargetData(); + + if (CV->isNullValue()) { + O << "\t.space\t " << TD.getTypeSize(CV->getType()) << "\n"; + return; + } else if (const ConstantArray *CVA = dyn_cast(CV)) { + if (isStringCompatible(CVA)) { + O << ".ascii"; + printAsCString(O, CVA); + O << "\n"; + } else { // Not a string. Print the values in successive locations + const std::vector &constValues = CVA->getValues(); + for (unsigned i=0; i < constValues.size(); i++) + emitGlobalConstant(cast(constValues[i].get())); + } + return; + } else if (const ConstantStruct *CVS = dyn_cast(CV)) { + // Print the fields in successive locations. Pad to align if needed! + const StructLayout *cvsLayout = TD.getStructLayout(CVS->getType()); + const std::vector& constValues = CVS->getValues(); + unsigned sizeSoFar = 0; + for (unsigned i=0, N = constValues.size(); i < N; i++) { + const Constant* field = cast(constValues[i].get()); + + // Check if padding is needed and insert one or more 0s. + unsigned fieldSize = TD.getTypeSize(field->getType()); + unsigned padSize = ((i == N-1? cvsLayout->StructSize + : cvsLayout->MemberOffsets[i+1]) + - cvsLayout->MemberOffsets[i]) - fieldSize; + sizeSoFar += fieldSize + padSize; + + // Now print the actual field value + emitGlobalConstant(field); + + // Insert the field padding unless it's zero bytes... + if (padSize) + O << "\t.space\t " << padSize << "\n"; + } + assert(sizeSoFar == cvsLayout->StructSize && + "Layout of constant struct may be incorrect!"); + return; + } else if (const ConstantFP *CFP = dyn_cast(CV)) { + // FP Constants are printed as integer constants to avoid losing + // precision... + double Val = CFP->getValue(); + switch (CFP->getType()->getPrimitiveID()) { + default: assert(0 && "Unknown floating point type!"); + case Type::FloatTyID: { + union FU { // Abide by C TBAA rules + float FVal; + unsigned UVal; + } U; + U.FVal = Val; + O << ".long\t" << U.UVal << "\t# float " << Val << "\n"; + return; + } + case Type::DoubleTyID: { + union DU { // Abide by C TBAA rules + double FVal; + uint64_t UVal; + struct { + uint32_t MSWord; + uint32_t LSWord; + } T; + } U; + U.FVal = Val; + + O << ".long\t" << U.T.MSWord << "\t# double most significant word " << Val << "\n"; + O << ".long\t" << U.T.LSWord << "\t# double least significant word" << Val << "\n"; + return; + } + } + } else if (CV->getType()->getPrimitiveSize() == 64) { + const ConstantInt *CI = dyn_cast(CV); + if(CI) { + union DU { // Abide by C TBAA rules + int64_t UVal; + struct { + uint32_t MSWord; + uint32_t LSWord; + } T; + } U; + U.UVal = CI->getRawValue(); + + O << ".long\t" << U.T.MSWord << "\t# Double-word most significant word " << U.UVal << "\n"; + O << ".long\t" << U.T.LSWord << "\t# Double-word least significant word" << U.UVal << "\n"; + return; + } + } + + const Type *type = CV->getType(); + O << "\t"; + switch (type->getPrimitiveID()) { + case Type::UByteTyID: case Type::SByteTyID: + O << ".byte"; + break; + case Type::UShortTyID: case Type::ShortTyID: + O << ".short"; + break; + case Type::BoolTyID: + case Type::PointerTyID: + case Type::UIntTyID: case Type::IntTyID: + O << ".long"; + break; + case Type::ULongTyID: case Type::LongTyID: + assert (0 && "Should have already output double-word constant."); + case Type::FloatTyID: case Type::DoubleTyID: + assert (0 && "Should have already output floating point constant."); + default: + assert (0 && "Can't handle printing this type of thing"); + break; + } + O << "\t"; + emitConstantValueOnly(CV); + O << "\n"; +} + +/// printConstantPool - Print to the current output stream assembly +/// representations of the constants in the constant pool MCP. This is +/// used to print out constants which have been "spilled to memory" by +/// the code generator. +/// +void Printer::printConstantPool(MachineConstantPool *MCP) { + const std::vector &CP = MCP->getConstants(); + const TargetData &TD = TM.getTargetData(); + + if (CP.empty()) return; + + for (unsigned i = 0, e = CP.size(); i != e; ++i) { + O << "\t.const\n"; + O << "\t.align " << (unsigned)TD.getTypeAlignment(CP[i]->getType()) + << "\n"; + O << ".CPI" << CurrentFnName << "_" << i << ":\t\t\t\t\t#" + << *CP[i] << "\n"; + emitGlobalConstant(CP[i]); + } +} + +/// runOnMachineFunction - This uses the printMachineInstruction() +/// method to print assembly for each instruction. +/// +bool Printer::runOnMachineFunction(MachineFunction &MF) { + // BBNumber is used here so that a given Printer will never give two + // BBs the same name. (If you have a better way, please let me know!) + static unsigned BBNumber = 0; + + O << "\n\n"; + // What's my mangled name? + CurrentFnName = Mang->getValueName(MF.getFunction()); + + // Print out constants referenced by the function + printConstantPool(MF.getConstantPool()); + + // Print out labels for the function. + O << "\t.text\n"; + O << "\t.globl\t" << CurrentFnName << "\n"; + O << "\t.align 5\n"; + O << CurrentFnName << ":\n"; + + // Number each basic block so that we can consistently refer to them + // in PC-relative references. + NumberForBB.clear(); + for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); + I != E; ++I) { + NumberForBB[I->getBasicBlock()] = BBNumber++; + } + + // Print out code for the function. + for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); + I != E; ++I) { + // Print a label for the basic block. + O << "L" << NumberForBB[I->getBasicBlock()] << ":\t# " + << I->getBasicBlock()->getName() << "\n"; + for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end(); + II != E; ++II) { + // Print the assembly for the instruction. + O << "\t"; + printMachineInstruction(II); + } + } + + // We didn't modify anything. + return false; +} + + + +void Printer::printOp(const MachineOperand &MO, + bool elideOffsetKeyword /* = false */) { + const MRegisterInfo &RI = *TM.getRegisterInfo(); + int new_symbol; + + switch (MO.getType()) { + case MachineOperand::MO_VirtualRegister: + if (Value *V = MO.getVRegValueOrNull()) { + O << "<" << V->getName() << ">"; + return; + } + // FALLTHROUGH + case MachineOperand::MO_MachineRegister: + O << RI.get(MO.getReg()).Name; + return; + + case MachineOperand::MO_SignExtendedImmed: + case MachineOperand::MO_UnextendedImmed: + O << (int)MO.getImmedValue(); + return; + case MachineOperand::MO_MachineBasicBlock: { + MachineBasicBlock *MBBOp = MO.getMachineBasicBlock(); + O << ".LBB" << Mang->getValueName(MBBOp->getParent()->getFunction()) + << "_" << MBBOp->getNumber () << "\t# " + << MBBOp->getBasicBlock ()->getName (); + return; + } + case MachineOperand::MO_PCRelativeDisp: + std::cerr << "Shouldn't use addPCDisp() when building PPC MachineInstrs"; + abort (); + return; + case MachineOperand::MO_GlobalAddress: + if (!elideOffsetKeyword) { + if(isa(MO.getGlobal())) { + Stubs.insert(Mang->getValueName(MO.getGlobal())); + O << "L" << Mang->getValueName(MO.getGlobal()) << "$stub"; + } else { + O << Mang->getValueName(MO.getGlobal()); + } + } + return; + case MachineOperand::MO_ExternalSymbol: + O << MO.getSymbolName(); + return; + default: + O << ""; return; + } +} + +#if 0 +static inline +unsigned int ValidOpcodes(const MachineInstr *MI, unsigned int ArgType[5]) { + int i; + unsigned int retval = 1; + + for(i = 0; i<5; i++) { + switch(ArgType[i]) { + case none: + break; + case Gpr: + case Gpr0: + Type::UIntTy + case Simm16: + case Zimm16: + case PCRelimm24: + case Imm24: + case Imm5: + case PCRelimm14: + case Imm14: + case Imm2: + case Crf: + case Imm3: + case Imm1: + case Fpr: + case Imm4: + case Imm8: + case Disimm16: + case Spr: + case Sgr: + }; + + } + } +} +#endif + +/// printMachineInstruction -- Print out a single PPC32 LLVM instruction +/// MI in Darwin syntax to the current output stream. +/// +void Printer::printMachineInstruction(const MachineInstr *MI) { + unsigned Opcode = MI->getOpcode(); + const TargetInstrInfo &TII = *TM.getInstrInfo(); + const TargetInstrDescriptor &Desc = TII.get(Opcode); + unsigned int i; + + unsigned int ArgCount = Desc.TSFlags & PPC32II::ArgCountMask; + unsigned int ArgType[5]; + + + ArgType[0] = (Desc.TSFlags>>PPC32II::Arg0TypeShift) & PPC32II::ArgTypeMask; + ArgType[1] = (Desc.TSFlags>>PPC32II::Arg1TypeShift) & PPC32II::ArgTypeMask; + ArgType[2] = (Desc.TSFlags>>PPC32II::Arg2TypeShift) & PPC32II::ArgTypeMask; + ArgType[3] = (Desc.TSFlags>>PPC32II::Arg3TypeShift) & PPC32II::ArgTypeMask; + ArgType[4] = (Desc.TSFlags>>PPC32II::Arg4TypeShift) & PPC32II::ArgTypeMask; + + assert ( ((Desc.TSFlags & PPC32II::VMX) == 0) && "Instruction requires VMX support"); + assert ( ((Desc.TSFlags & PPC32II::PPC64) == 0) && "Instruction requires 64 bit support"); + //assert ( ValidOpcodes(MI, ArgType) && "Instruction has invalid inputs"); + ++EmittedInsts; + + if(Opcode == PPC32::MovePCtoLR) { + O << "mflr r0\n"; + O << "bcl 20,31,L" << CurrentFnName << "$pb\n"; + O << "L" << CurrentFnName << "$pb:\n"; + return; + } + + O << TII.getName(MI->getOpcode()) << " "; + std::cout << TII.getName(MI->getOpcode()) << " expects " << ArgCount << " args\n"; + + if(Opcode == PPC32::LOADLoAddr) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << ", lo16("; + printOp(MI->getOperand(2)); + O << "-L" << CurrentFnName << "$pb)\n"; + return; + } + + if(Opcode == PPC32::LOADHiAddr) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << ", ha16(" ; + printOp(MI->getOperand(2)); + O << "-L" << CurrentFnName << "$pb)\n"; + return; + } + + if( (ArgCount == 3) && (ArgType[1] == PPC32II::Disimm16) ) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << "("; + if((ArgType[2] == PPC32II::Gpr0) && (MI->getOperand(2).getReg() == PPC32::R0)) { + O << "0"; + } else { + printOp(MI->getOperand(2)); + } + O << ")\n"; + } else { + for(i = 0; i< ArgCount; i++) { + if( (ArgType[i] == PPC32II::Gpr0) && ((MI->getOperand(i).getReg()) == PPC32::R0)) { + O << "0"; + } else { + //std::cout << "DEBUG " << (*(TM.getRegisterInfo())).get(MI->getOperand(i).getReg()).Name << "\n"; + printOp(MI->getOperand(i)); + } + if( ArgCount - 1 == i) { + O << "\n"; + } else { + O << ", "; + } + } + } + + return; +} + +bool Printer::doInitialization(Module &M) { + // Tell gas we are outputting Intel syntax (not AT&T syntax) assembly. + // + // Bug: gas in `intel_syntax noprefix' mode interprets the symbol `Sp' in an + // instruction as a reference to the register named sp, and if you try to + // reference a symbol `Sp' (e.g. `mov ECX, OFFSET Sp') then it gets lowercased + // before being looked up in the symbol table. This creates spurious + // `undefined symbol' errors when linking. Workaround: Do not use `noprefix' + // mode, and decorate all register names with percent signs. + // O << "\t.intel_syntax\n"; + Mang = new Mangler(M, true); + return false; // success +} + +// SwitchSection - Switch to the specified section of the executable if we are +// not already in it! +// +static void SwitchSection(std::ostream &OS, std::string &CurSection, + const char *NewSection) { + if (CurSection != NewSection) { + CurSection = NewSection; + if (!CurSection.empty()) + OS << "\t" << NewSection << "\n"; + } +} + +bool Printer::doFinalization(Module &M) { + const TargetData &TD = TM.getTargetData(); + std::string CurSection; + + // Print out module-level global variables here. + for (Module::const_giterator I = M.gbegin(), E = M.gend(); I != E; ++I) + if (I->hasInitializer()) { // External global require no code + O << "\n\n"; + std::string name = Mang->getValueName(I); + Constant *C = I->getInitializer(); + unsigned Size = TD.getTypeSize(C->getType()); + unsigned Align = TD.getTypeAlignment(C->getType()); + + if (C->isNullValue() && + (I->hasLinkOnceLinkage() || I->hasInternalLinkage() || + I->hasWeakLinkage() /* FIXME: Verify correct */)) { + SwitchSection(O, CurSection, ".data"); + if (I->hasInternalLinkage()) + O << "\t.local " << name << "\n"; + + O << "\t.comm " << name << "," << TD.getTypeSize(C->getType()) + << "," << (unsigned)TD.getTypeAlignment(C->getType()); + O << "\t\t# "; + WriteAsOperand(O, I, true, true, &M); + O << "\n"; + } else { + switch (I->getLinkage()) { + case GlobalValue::LinkOnceLinkage: + case GlobalValue::WeakLinkage: // FIXME: Verify correct for weak. + // Nonnull linkonce -> weak + O << "\t.weak " << name << "\n"; + SwitchSection(O, CurSection, ""); + O << "\t.section\t.llvm.linkonce.d." << name << ",\"aw\",@progbits\n"; + break; + + case GlobalValue::AppendingLinkage: + // FIXME: appending linkage variables should go into a section of + // their name or something. For now, just emit them as external. + case GlobalValue::ExternalLinkage: + // If external or appending, declare as a global symbol + O << "\t.globl " << name << "\n"; + // FALL THROUGH + case GlobalValue::InternalLinkage: + if (C->isNullValue()) + SwitchSection(O, CurSection, ".bss"); + else + SwitchSection(O, CurSection, ".data"); + break; + } + + O << "\t.align " << Align << "\n"; + O << name << ":\t\t\t\t# "; + WriteAsOperand(O, I, true, true, &M); + O << " = "; + WriteAsOperand(O, C, false, false, &M); + O << "\n"; + emitGlobalConstant(C); + } + } + + for(std::set::iterator i = Stubs.begin(); i != Stubs.end(); ++i) { + O << ".data\n"; + O << ".section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32\n"; + O << "\t.align 2\n"; + O << "L" << *i << "$stub:\n"; + O << "\t.indirect_symbol " << *i << "\n"; + O << "\tmflr r0\n"; + O << "\tbcl 20,31,L0$" << *i << "\n"; + O << "L0$" << *i << ":\n"; + O << "\tmflr r11\n"; + O << "\taddis r11,r11,ha16(L" << *i << "$lazy_ptr-L0$" << *i << ")\n"; + O << "\tmtlr r0\n"; + O << "\tlwzu r12,lo16(L" << *i << "$lazy_ptr-L0$" << *i << ")(r11)\n"; + O << "\tmtctr r12\n"; + O << "\tbctr\n"; + O << ".data\n"; + O << ".lazy_symbol_pointer\n"; + O << "L" << *i << "$lazy_ptr:\n"; + O << ".indirect_symbol " << *i << "\n"; + O << ".long dyld_stub_binding_helper\n"; + + } + + delete Mang; + return false; // success +} + +} // End llvm namespace diff --git a/lib/Target/PowerPC/PPC32ISelSimple.cpp b/lib/Target/PowerPC/PPC32ISelSimple.cpp new file mode 100644 index 00000000000..c9c0e2d870a --- /dev/null +++ b/lib/Target/PowerPC/PPC32ISelSimple.cpp @@ -0,0 +1,2621 @@ +//===-- InstSelectSimple.cpp - A simple instruction selector for PowerPC --===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "PowerPC.h" +#include "PowerPCInstrBuilder.h" +#include "PowerPCInstrInfo.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Instructions.h" +#include "llvm/IntrinsicLowering.h" +#include "llvm/Pass.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/SSARegMap.h" +#include "llvm/Target/MRegisterInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/InstVisitor.h" +using namespace llvm; + +namespace { + /// TypeClass - Used by the PowerPC backend to group LLVM types by their basic PPC + /// Representation. + /// + enum TypeClass { + cByte, cShort, cInt, cFP, cLong + }; +} + +/// getClass - Turn a primitive type into a "class" number which is based on the +/// size of the type, and whether or not it is floating point. +/// +static inline TypeClass getClass(const Type *Ty) { + switch (Ty->getPrimitiveID()) { + case Type::SByteTyID: + case Type::UByteTyID: return cByte; // Byte operands are class #0 + case Type::ShortTyID: + case Type::UShortTyID: return cShort; // Short operands are class #1 + case Type::IntTyID: + case Type::UIntTyID: + case Type::PointerTyID: return cInt; // Int's and pointers are class #2 + + case Type::FloatTyID: + case Type::DoubleTyID: return cFP; // Floating Point is #3 + + case Type::LongTyID: + case Type::ULongTyID: return cLong; // Longs are class #4 + default: + assert(0 && "Invalid type to getClass!"); + return cByte; // not reached + } +} + +// getClassB - Just like getClass, but treat boolean values as ints. +static inline TypeClass getClassB(const Type *Ty) { + if (Ty == Type::BoolTy) return cInt; + return getClass(Ty); +} + +namespace { + struct ISel : public FunctionPass, InstVisitor { + TargetMachine &TM; + MachineFunction *F; // The function we are compiling into + MachineBasicBlock *BB; // The current MBB we are compiling + int VarArgsFrameIndex; // FrameIndex for start of varargs area + int ReturnAddressIndex; // FrameIndex for the return address + + std::map RegMap; // Mapping between Val's and SSA Regs + + // MBBMap - Mapping between LLVM BB -> Machine BB + std::map MBBMap; + + // AllocaMap - Mapping from fixed sized alloca instructions to the + // FrameIndex for the alloca. + std::map AllocaMap; + + ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {} + + /// runOnFunction - Top level implementation of instruction selection for + /// the entire function. + /// + bool runOnFunction(Function &Fn) { + // First pass over the function, lower any unknown intrinsic functions + // with the IntrinsicLowering class. + LowerUnknownIntrinsicFunctionCalls(Fn); + + F = &MachineFunction::construct(&Fn, TM); + + // Create all of the machine basic blocks for the function... + for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) + F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I)); + + BB = &F->front(); + + // Set up a frame object for the return address. This is used by the + // llvm.returnaddress & llvm.frameaddress intrinisics. + ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4); + + // Copy incoming arguments off of the stack... + LoadArgumentsToVirtualRegs(Fn); + + // Instruction select everything except PHI nodes + visit(Fn); + + // Select the PHI nodes + SelectPHINodes(); + + RegMap.clear(); + MBBMap.clear(); + AllocaMap.clear(); + F = 0; + // We always build a machine code representation for the function + return true; + } + + virtual const char *getPassName() const { + return "PowerPC Simple Instruction Selection"; + } + + /// visitBasicBlock - This method is called when we are visiting a new basic + /// block. This simply creates a new MachineBasicBlock to emit code into + /// and adds it to the current MachineFunction. Subsequent visit* for + /// instructions will be invoked for all instructions in the basic block. + /// + void visitBasicBlock(BasicBlock &LLVM_BB) { + BB = MBBMap[&LLVM_BB]; + } + + /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the + /// function, lowering any calls to unknown intrinsic functions into the + /// equivalent LLVM code. + /// + void LowerUnknownIntrinsicFunctionCalls(Function &F); + + /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function + /// from the stack into virtual registers. + /// + void LoadArgumentsToVirtualRegs(Function &F); + + /// SelectPHINodes - Insert machine code to generate phis. This is tricky + /// because we have to generate our sources into the source basic blocks, + /// not the current one. + /// + void SelectPHINodes(); + + // Visitation methods for various instructions. These methods simply emit + // fixed PowerPC code for each instruction. + + // Control flow operators + void visitReturnInst(ReturnInst &RI); + void visitBranchInst(BranchInst &BI); + + struct ValueRecord { + Value *Val; + unsigned Reg; + const Type *Ty; + ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {} + ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {} + }; + void doCall(const ValueRecord &Ret, MachineInstr *CallMI, + const std::vector &Args); + void visitCallInst(CallInst &I); + void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I); + + // Arithmetic operators + void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass); + void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); } + void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); } + void visitMul(BinaryOperator &B); + + void visitDiv(BinaryOperator &B) { visitDivRem(B); } + void visitRem(BinaryOperator &B) { visitDivRem(B); } + void visitDivRem(BinaryOperator &B); + + // Bitwise operators + void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); } + void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); } + void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); } + + // Comparison operators... + void visitSetCondInst(SetCondInst &I); + unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1, + MachineBasicBlock *MBB, + MachineBasicBlock::iterator MBBI); + void visitSelectInst(SelectInst &SI); + + + // Memory Instructions + void visitLoadInst(LoadInst &I); + void visitStoreInst(StoreInst &I); + void visitGetElementPtrInst(GetElementPtrInst &I); + void visitAllocaInst(AllocaInst &I); + void visitMallocInst(MallocInst &I); + void visitFreeInst(FreeInst &I); + + // Other operators + void visitShiftInst(ShiftInst &I); + void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass + void visitCastInst(CastInst &I); + void visitVANextInst(VANextInst &I); + void visitVAArgInst(VAArgInst &I); + + void visitInstruction(Instruction &I) { + std::cerr << "Cannot instruction select: " << I; + abort(); + } + + /// promote32 - Make a value 32-bits wide, and put it somewhere. + /// + void promote32(unsigned targetReg, const ValueRecord &VR); + + /// emitGEPOperation - Common code shared between visitGetElementPtrInst and + /// constant expression GEP support. + /// + void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP, + Value *Src, User::op_iterator IdxBegin, + User::op_iterator IdxEnd, unsigned TargetReg); + + /// emitCastOperation - Common code shared between visitCastInst and + /// constant expression cast support. + /// + void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP, + Value *Src, const Type *DestTy, unsigned TargetReg); + + /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary + /// and constant expression support. + /// + void emitSimpleBinaryOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned TargetReg); + + /// emitBinaryFPOperation - This method handles emission of floating point + /// Add (0), Sub (1), Mul (2), and Div (3) operations. + void emitBinaryFPOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned TargetReg); + + void emitMultiply(MachineBasicBlock *BB, MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned TargetReg); + + void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI, + unsigned DestReg, const Type *DestTy, + unsigned Op0Reg, unsigned Op1Reg); + void doMultiplyConst(MachineBasicBlock *MBB, + MachineBasicBlock::iterator MBBI, + unsigned DestReg, const Type *DestTy, + unsigned Op0Reg, unsigned Op1Val); + + void emitDivRemOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, bool isDiv, + unsigned TargetReg); + + /// emitSetCCOperation - Common code shared between visitSetCondInst and + /// constant expression support. + /// + void emitSetCCOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned Opcode, + unsigned TargetReg); + + /// emitShiftOperation - Common code shared between visitShiftInst and + /// constant expression support. + /// + void emitShiftOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op, Value *ShiftAmount, bool isLeftShift, + const Type *ResultTy, unsigned DestReg); + + /// emitSelectOperation - Common code shared between visitSelectInst and the + /// constant expression support. + void emitSelectOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Cond, Value *TrueVal, Value *FalseVal, + unsigned DestReg); + + /// copyConstantToRegister - Output the instructions required to put the + /// specified constant into the specified register. + /// + void copyConstantToRegister(MachineBasicBlock *MBB, + MachineBasicBlock::iterator MBBI, + Constant *C, unsigned Reg); + + void emitUCOM(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI, + unsigned LHS, unsigned RHS); + + /// makeAnotherReg - This method returns the next register number we haven't + /// yet used. + /// + /// Long values are handled somewhat specially. They are always allocated + /// as pairs of 32 bit integer values. The register number returned is the + /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits + /// of the long value. + /// + unsigned makeAnotherReg(const Type *Ty) { + assert(dynamic_cast(TM.getRegisterInfo()) && + "Current target doesn't have PPC reg info??"); + const PowerPCRegisterInfo *MRI = + static_cast(TM.getRegisterInfo()); + if (Ty == Type::LongTy || Ty == Type::ULongTy) { + const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy); + // Create the lower part + F->getSSARegMap()->createVirtualRegister(RC); + // Create the upper part. + return F->getSSARegMap()->createVirtualRegister(RC)-1; + } + + // Add the mapping of regnumber => reg class to MachineFunction + const TargetRegisterClass *RC = MRI->getRegClassForType(Ty); + return F->getSSARegMap()->createVirtualRegister(RC); + } + + /// getReg - This method turns an LLVM value into a register number. + /// + unsigned getReg(Value &V) { return getReg(&V); } // Allow references + unsigned getReg(Value *V) { + // Just append to the end of the current bb. + MachineBasicBlock::iterator It = BB->end(); + return getReg(V, BB, It); + } + unsigned getReg(Value *V, MachineBasicBlock *MBB, + MachineBasicBlock::iterator IPt); + + /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca + /// that is to be statically allocated with the initial stack frame + /// adjustment. + unsigned getFixedSizedAllocaFI(AllocaInst *AI); + }; +} + +/// dyn_castFixedAlloca - If the specified value is a fixed size alloca +/// instruction in the entry block, return it. Otherwise, return a null +/// pointer. +static AllocaInst *dyn_castFixedAlloca(Value *V) { + if (AllocaInst *AI = dyn_cast(V)) { + BasicBlock *BB = AI->getParent(); + if (isa(AI->getArraySize()) && BB ==&BB->getParent()->front()) + return AI; + } + return 0; +} + +/// getReg - This method turns an LLVM value into a register number. +/// +unsigned ISel::getReg(Value *V, MachineBasicBlock *MBB, + MachineBasicBlock::iterator IPt) { + // If this operand is a constant, emit the code to copy the constant into + // the register here... + // + if (Constant *C = dyn_cast(V)) { + unsigned Reg = makeAnotherReg(V->getType()); + copyConstantToRegister(MBB, IPt, C, Reg); + return Reg; + } else if (GlobalValue *GV = dyn_cast(V)) { + unsigned Reg1 = makeAnotherReg(V->getType()); + unsigned Reg2 = makeAnotherReg(V->getType()); + // Move the address of the global into the register + BuildMI(*MBB, IPt, PPC32::LOADHiAddr, 2, Reg1).addReg(PPC32::R0).addGlobalAddress(GV); + BuildMI(*MBB, IPt, PPC32::LOADLoAddr, 2, Reg2).addReg(Reg1).addGlobalAddress(GV); + return Reg2; + } else if (CastInst *CI = dyn_cast(V)) { + // Do not emit noop casts at all. + if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType())) + return getReg(CI->getOperand(0), MBB, IPt); + } else if (AllocaInst *AI = dyn_castFixedAlloca(V)) { + unsigned Reg = makeAnotherReg(V->getType()); + unsigned FI = getFixedSizedAllocaFI(AI); + addFrameReference(BuildMI(*MBB, IPt, PPC32::ADDI, 2, Reg), FI, 0, false); + return Reg; + } + + unsigned &Reg = RegMap[V]; + if (Reg == 0) { + Reg = makeAnotherReg(V->getType()); + RegMap[V] = Reg; + } + + return Reg; +} + +/// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca +/// that is to be statically allocated with the initial stack frame +/// adjustment. +unsigned ISel::getFixedSizedAllocaFI(AllocaInst *AI) { + // Already computed this? + std::map::iterator I = AllocaMap.lower_bound(AI); + if (I != AllocaMap.end() && I->first == AI) return I->second; + + const Type *Ty = AI->getAllocatedType(); + ConstantUInt *CUI = cast(AI->getArraySize()); + unsigned TySize = TM.getTargetData().getTypeSize(Ty); + TySize *= CUI->getValue(); // Get total allocated size... + unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty); + + // Create a new stack object using the frame manager... + int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment); + AllocaMap.insert(I, std::make_pair(AI, FrameIdx)); + return FrameIdx; +} + + +/// copyConstantToRegister - Output the instructions required to put the +/// specified constant into the specified register. +/// +void ISel::copyConstantToRegister(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Constant *C, unsigned R) { + if (ConstantExpr *CE = dyn_cast(C)) { + unsigned Class = 0; + switch (CE->getOpcode()) { + case Instruction::GetElementPtr: + emitGEPOperation(MBB, IP, CE->getOperand(0), + CE->op_begin()+1, CE->op_end(), R); + return; + case Instruction::Cast: + emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R); + return; + + case Instruction::Xor: ++Class; // FALL THROUGH + case Instruction::Or: ++Class; // FALL THROUGH + case Instruction::And: ++Class; // FALL THROUGH + case Instruction::Sub: ++Class; // FALL THROUGH + case Instruction::Add: + emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + Class, R); + return; + + case Instruction::Mul: + emitMultiply(MBB, IP, CE->getOperand(0), CE->getOperand(1), R); + return; + + case Instruction::Div: + case Instruction::Rem: + emitDivRemOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOpcode() == Instruction::Div, R); + return; + + case Instruction::SetNE: + case Instruction::SetEQ: + case Instruction::SetLT: + case Instruction::SetGT: + case Instruction::SetLE: + case Instruction::SetGE: + emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOpcode(), R); + return; + + case Instruction::Shl: + case Instruction::Shr: + emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOpcode() == Instruction::Shl, CE->getType(), R); + return; + + case Instruction::Select: + emitSelectOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOperand(2), R); + return; + + default: + std::cerr << "Offending expr: " << C << "\n"; + assert(0 && "Constant expression not yet handled!\n"); + } + } + + if (C->getType()->isIntegral()) { + unsigned Class = getClassB(C->getType()); + + if (Class == cLong) { + // Copy the value into the register pair. + uint64_t Val = cast(C)->getRawValue(); + unsigned hiTmp = makeAnotherReg(Type::IntTy); + unsigned loTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::ADDIS, 2, loTmp).addReg(PPC32::R0).addImm(Val >> 48); + BuildMI(*MBB, IP, PPC32::ORI, 2, R).addReg(loTmp).addImm((Val >> 32) & 0xFFFF); + BuildMI(*MBB, IP, PPC32::ADDIS, 2, hiTmp).addReg(PPC32::R0).addImm((Val >> 16) & 0xFFFF); + BuildMI(*MBB, IP, PPC32::ORI, 2, R+1).addReg(hiTmp).addImm(Val & 0xFFFF); + return; + } + + assert(Class <= cInt && "Type not handled yet!"); + + if (C->getType() == Type::BoolTy) { + BuildMI(*MBB, IP, PPC32::ADDI, 2, R).addReg(PPC32::R0).addImm(C == ConstantBool::True); + } else if (Class == cByte || Class == cShort) { + ConstantInt *CI = cast(C); + BuildMI(*MBB, IP, PPC32::ADDI, 2, R).addReg(PPC32::R0).addImm(CI->getRawValue()); + } else { + ConstantInt *CI = cast(C); + int TheVal = CI->getRawValue() & 0xFFFFFFFF; + if (TheVal < 32768 && TheVal >= -32768) { + BuildMI(*MBB, IP, PPC32::ADDI, 2, R).addReg(PPC32::R0).addImm(CI->getRawValue()); + } else { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::ADDIS, 2, TmpReg).addReg(PPC32::R0).addImm(CI->getRawValue() >> 16); + BuildMI(*MBB, IP, PPC32::ORI, 2, R).addReg(TmpReg).addImm(CI->getRawValue() & 0xFFFF); + } + } + } else if (ConstantFP *CFP = dyn_cast(C)) { + // We need to spill the constant to memory... + MachineConstantPool *CP = F->getConstantPool(); + unsigned CPI = CP->getConstantPoolIndex(CFP); + const Type *Ty = CFP->getType(); + + assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); + unsigned LoadOpcode = Ty == Type::FloatTy ? PPC32::LFS : PPC32::LFD; + addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 2, R), CPI); + } else if (isa(C)) { + // Copy zero (null pointer) to the register. + BuildMI(*MBB, IP, PPC32::ADDI, 2, R).addReg(PPC32::R0).addImm(0); + } else if (ConstantPointerRef *CPR = dyn_cast(C)) { + BuildMI(*MBB, IP, PPC32::ADDIS, 2, R).addReg(PPC32::R0).addGlobalAddress(CPR->getValue()); + BuildMI(*MBB, IP, PPC32::ORI, 2, R).addReg(PPC32::R0).addGlobalAddress(CPR->getValue()); + } else { + std::cerr << "Offending constant: " << C << "\n"; + assert(0 && "Type not handled yet!"); + } +} + +/// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from +/// the stack into virtual registers. +/// +/// FIXME: When we can calculate which args are coming in via registers +/// source them from there instead. +void ISel::LoadArgumentsToVirtualRegs(Function &Fn) { + unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot + unsigned GPR_remaining = 8; + unsigned FPR_remaining = 13; + unsigned GPR_idx = 3; + unsigned FPR_idx = 1; + + MachineFrameInfo *MFI = F->getFrameInfo(); + + for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) { + bool ArgLive = !I->use_empty(); + unsigned Reg = ArgLive ? getReg(*I) : 0; + int FI; // Frame object index + + switch (getClassB(I->getType())) { + case cByte: + if (ArgLive) { + FI = MFI->CreateFixedObject(1, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, Reg).addReg(PPC32::R0+GPR_idx).addReg(PPC32::R0+GPR_idx); + } else { + addFrameReference(BuildMI(BB, PPC32::LBZ, 2, Reg), FI); + } + } + break; + case cShort: + if (ArgLive) { + FI = MFI->CreateFixedObject(2, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, Reg).addReg(PPC32::R0+GPR_idx).addReg(PPC32::R0+GPR_idx); + } else { + addFrameReference(BuildMI(BB, PPC32::LHZ, 2, Reg), FI); + } + } + break; + case cInt: + if (ArgLive) { + FI = MFI->CreateFixedObject(4, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, Reg).addReg(PPC32::R0+GPR_idx).addReg(PPC32::R0+GPR_idx); + } else { + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, Reg), FI); + } + } + break; + case cLong: + if (ArgLive) { + FI = MFI->CreateFixedObject(8, ArgOffset); + if (GPR_remaining > 1) { + BuildMI(BB, PPC32::OR, 2, Reg).addReg(PPC32::R0+GPR_idx).addReg(PPC32::R0+GPR_idx); + BuildMI(BB, PPC32::OR, 2, Reg+1).addReg(PPC32::R0+GPR_idx+1).addReg(PPC32::R0+GPR_idx+1); + } else { + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, Reg), FI); + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, Reg+1), FI, 4); + } + } + ArgOffset += 4; // longs require 4 additional bytes + if (GPR_remaining > 1) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + break; + case cFP: + if (ArgLive) { + unsigned Opcode; + if (I->getType() == Type::FloatTy) { + Opcode = PPC32::LFS; + FI = MFI->CreateFixedObject(4, ArgOffset); + } else { + Opcode = PPC32::LFD; + FI = MFI->CreateFixedObject(8, ArgOffset); + } + if (FPR_remaining > 0) { + BuildMI(BB, PPC32::FMR, 1, Reg).addReg(PPC32::F0+FPR_idx); + FPR_remaining--; + FPR_idx++; + } else { + addFrameReference(BuildMI(BB, Opcode, 2, Reg), FI); + } + } + if (I->getType() == Type::DoubleTy) { + ArgOffset += 4; // doubles require 4 additional bytes + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + break; + default: + assert(0 && "Unhandled argument type!"); + } + ArgOffset += 4; // Each argument takes at least 4 bytes on the stack... + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + + // If the function takes variable number of arguments, add a frame offset for + // the start of the first vararg value... this is used to expand + // llvm.va_start. + if (Fn.getFunctionType()->isVarArg()) + VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); +} + + +/// SelectPHINodes - Insert machine code to generate phis. This is tricky +/// because we have to generate our sources into the source basic blocks, not +/// the current one. +/// +void ISel::SelectPHINodes() { + const TargetInstrInfo &TII = *TM.getInstrInfo(); + const Function &LF = *F->getFunction(); // The LLVM function... + for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) { + const BasicBlock *BB = I; + MachineBasicBlock &MBB = *MBBMap[I]; + + // Loop over all of the PHI nodes in the LLVM basic block... + MachineBasicBlock::iterator PHIInsertPoint = MBB.begin(); + for (BasicBlock::const_iterator I = BB->begin(); + PHINode *PN = const_cast(dyn_cast(I)); ++I) { + + // Create a new machine instr PHI node, and insert it. + unsigned PHIReg = getReg(*PN); + MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint, + PPC32::PHI, PN->getNumOperands(), PHIReg); + + MachineInstr *LongPhiMI = 0; + if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy) + LongPhiMI = BuildMI(MBB, PHIInsertPoint, + PPC32::PHI, PN->getNumOperands(), PHIReg+1); + + // PHIValues - Map of blocks to incoming virtual registers. We use this + // so that we only initialize one incoming value for a particular block, + // even if the block has multiple entries in the PHI node. + // + std::map PHIValues; + + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)]; + unsigned ValReg; + std::map::iterator EntryIt = + PHIValues.lower_bound(PredMBB); + + if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) { + // We already inserted an initialization of the register for this + // predecessor. Recycle it. + ValReg = EntryIt->second; + + } else { + // Get the incoming value into a virtual register. + // + Value *Val = PN->getIncomingValue(i); + + // If this is a constant or GlobalValue, we may have to insert code + // into the basic block to compute it into a virtual register. + if ((isa(Val) && !isa(Val)) || + isa(Val)) { + // Simple constants get emitted at the end of the basic block, + // before any terminator instructions. We "know" that the code to + // move a constant into a register will never clobber any flags. + ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator()); + } else { + // Because we don't want to clobber any values which might be in + // physical registers with the computation of this constant (which + // might be arbitrarily complex if it is a constant expression), + // just insert the computation at the top of the basic block. + MachineBasicBlock::iterator PI = PredMBB->begin(); + + // Skip over any PHI nodes though! + while (PI != PredMBB->end() && PI->getOpcode() == PPC32::PHI) + ++PI; + + ValReg = getReg(Val, PredMBB, PI); + } + + // Remember that we inserted a value for this PHI for this predecessor + PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg)); + } + + PhiMI->addRegOperand(ValReg); + PhiMI->addMachineBasicBlockOperand(PredMBB); + if (LongPhiMI) { + LongPhiMI->addRegOperand(ValReg+1); + LongPhiMI->addMachineBasicBlockOperand(PredMBB); + } + } + + // Now that we emitted all of the incoming values for the PHI node, make + // sure to reposition the InsertPoint after the PHI that we just added. + // This is needed because we might have inserted a constant into this + // block, right after the PHI's which is before the old insert point! + PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI; + ++PHIInsertPoint; + } + } +} + + +// canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold +// it into the conditional branch or select instruction which is the only user +// of the cc instruction. This is the case if the conditional branch is the +// only user of the setcc, and if the setcc is in the same basic block as the +// conditional branch. We also don't handle long arguments below, so we reject +// them here as well. +// +static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) { + if (SetCondInst *SCI = dyn_cast(V)) + if (SCI->hasOneUse()) { + Instruction *User = cast(SCI->use_back()); + if ((isa(User) || isa(User)) && + SCI->getParent() == User->getParent() && + (getClassB(SCI->getOperand(0)->getType()) != cLong || + SCI->getOpcode() == Instruction::SetEQ || + SCI->getOpcode() == Instruction::SetNE)) + return SCI; + } + return 0; +} + +// Return a fixed numbering for setcc instructions which does not depend on the +// order of the opcodes. +// +static unsigned getSetCCNumber(unsigned Opcode) { + switch(Opcode) { + default: assert(0 && "Unknown setcc instruction!"); + case Instruction::SetEQ: return 0; + case Instruction::SetNE: return 1; + case Instruction::SetLT: return 2; + case Instruction::SetGE: return 3; + case Instruction::SetGT: return 4; + case Instruction::SetLE: return 5; + } +} + +/// emitUCOM - emits an unordered FP compare. +void ISel::emitUCOM(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP, + unsigned LHS, unsigned RHS) { + BuildMI(*MBB, IP, PPC32::FCMPU, 2, PPC32::CR0).addReg(LHS).addReg(RHS); +} + +// EmitComparison - This function emits a comparison of the two operands, +// returning the extended setcc code to use. +unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1, + MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP) { + // The arguments are already supposed to be of the same type. + const Type *CompTy = Op0->getType(); + unsigned Class = getClassB(CompTy); + unsigned Op0r = getReg(Op0, MBB, IP); + + // Special case handling of: cmp R, i + if (isa(Op1)) { + BuildMI(*MBB, IP, PPC32::CMPI, 2, PPC32::CR0).addReg(Op0r).addImm(0); + } else if (ConstantInt *CI = dyn_cast(Op1)) { + if (Class == cByte || Class == cShort || Class == cInt) { + unsigned Op1v = CI->getRawValue(); + + // Mask off any upper bits of the constant, if there are any... + Op1v &= (1ULL << (8 << Class)) - 1; + + // Compare immediate or promote to reg? + if (Op1v <= 32767) { + BuildMI(*MBB, IP, CompTy->isSigned() ? PPC32::CMPI : PPC32::CMPLI, 3, PPC32::CR0).addImm(0).addReg(Op0r).addImm(Op1v); + } else { + unsigned Op1r = getReg(Op1, MBB, IP); + BuildMI(*MBB, IP, CompTy->isSigned() ? PPC32::CMP : PPC32::CMPL, 3, PPC32::CR0).addImm(0).addReg(Op0r).addReg(Op1r); + } + return OpNum; + } else { + assert(Class == cLong && "Unknown integer class!"); + unsigned LowCst = CI->getRawValue(); + unsigned HiCst = CI->getRawValue() >> 32; + if (OpNum < 2) { // seteq, setne + unsigned LoTmp = Op0r; + if (LowCst != 0) { + unsigned LoLow = makeAnotherReg(Type::IntTy); + unsigned LoTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::XORI, 2, LoLow).addReg(Op0r).addImm(LowCst); + BuildMI(*MBB, IP, PPC32::XORIS, 2, LoTmp).addReg(LoLow).addImm(LowCst >> 16); + } + unsigned HiTmp = Op0r+1; + if (HiCst != 0) { + unsigned HiLow = makeAnotherReg(Type::IntTy); + unsigned HiTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::XORI, 2, HiLow).addReg(Op0r+1).addImm(HiCst); + BuildMI(*MBB, IP, PPC32::XORIS, 2, HiTmp).addReg(HiLow).addImm(HiCst >> 16); + } + unsigned FinalTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::ORo, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp); + //BuildMI(*MBB, IP, PPC32::CMPLI, 2, PPC32::CR0).addReg(FinalTmp).addImm(0); + return OpNum; + } else { + // Emit a sequence of code which compares the high and low parts once + // each, then uses a conditional move to handle the overflow case. For + // example, a setlt for long would generate code like this: + // + // AL = lo(op1) < lo(op2) // Always unsigned comparison + // BL = hi(op1) < hi(op2) // Signedness depends on operands + // dest = hi(op1) == hi(op2) ? BL : AL; + // + + // FIXME: Not Yet Implemented + return OpNum; + } + } + } + + unsigned Op1r = getReg(Op1, MBB, IP); + switch (Class) { + default: assert(0 && "Unknown type class!"); + case cByte: + case cShort: + case cInt: + BuildMI(*MBB, IP, CompTy->isSigned() ? PPC32::CMP : PPC32::CMPL, 2, PPC32::CR0).addReg(Op0r).addReg(Op1r); + break; + case cFP: + emitUCOM(MBB, IP, Op0r, Op1r); + break; + + case cLong: + if (OpNum < 2) { // seteq, setne + unsigned LoTmp = makeAnotherReg(Type::IntTy); + unsigned HiTmp = makeAnotherReg(Type::IntTy); + unsigned FinalTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::XOR, 2, LoTmp).addReg(Op0r).addReg(Op1r); + BuildMI(*MBB, IP, PPC32::XOR, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1); + BuildMI(*MBB, IP, PPC32::ORo, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp); + //BuildMI(*MBB, IP, PPC32::CMPLI, 2, PPC32::CR0).addReg(FinalTmp).addImm(0); + break; // Allow the sete or setne to be generated from flags set by OR + } else { + // Emit a sequence of code which compares the high and low parts once + // each, then uses a conditional move to handle the overflow case. For + // example, a setlt for long would generate code like this: + // + // AL = lo(op1) < lo(op2) // Signedness depends on operands + // BL = hi(op1) < hi(op2) // Always unsigned comparison + // dest = hi(op1) == hi(op2) ? BL : AL; + // + + // FIXME: Not Yet Implemented + return OpNum; + } + } + return OpNum; +} + +/// SetCC instructions - Here we just emit boilerplate code to set a byte-sized +/// register, then move it to wherever the result should be. +/// +void ISel::visitSetCondInst(SetCondInst &I) { + if (canFoldSetCCIntoBranchOrSelect(&I)) + return; // Fold this into a branch or select. + + unsigned DestReg = getReg(I); + MachineBasicBlock::iterator MII = BB->end(); + emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),DestReg); +} + +/// emitSetCCOperation - Common code shared between visitSetCondInst and +/// constant expression support. +/// +/// FIXME: this is wrong. we should figure out a way to guarantee +/// TargetReg is a CR and then make it a no-op +void ISel::emitSetCCOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned Opcode, + unsigned TargetReg) { + unsigned OpNum = getSetCCNumber(Opcode); + OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP); + + // The value is already in CR0 at this point, do nothing. +} + + +void ISel::visitSelectInst(SelectInst &SI) { + unsigned DestReg = getReg(SI); + MachineBasicBlock::iterator MII = BB->end(); + emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),SI.getFalseValue(), DestReg); +} + +/// emitSelect - Common code shared between visitSelectInst and the constant +/// expression support. +/// FIXME: this is most likely broken in one or more ways. Namely, PowerPC has +/// no select instruction. FSEL only works for comparisons against zero. +void ISel::emitSelectOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Cond, Value *TrueVal, Value *FalseVal, + unsigned DestReg) { + unsigned SelectClass = getClassB(TrueVal->getType()); + + unsigned TrueReg = getReg(TrueVal, MBB, IP); + unsigned FalseReg = getReg(FalseVal, MBB, IP); + + if (TrueReg == FalseReg) { + if (SelectClass == cFP) { + BuildMI(*MBB, IP, PPC32::FMR, 1, DestReg).addReg(TrueReg); + } else { + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(TrueReg).addReg(TrueReg); + } + + if (SelectClass == cLong) + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(TrueReg+1).addReg(TrueReg+1); + return; + } + + unsigned CondReg = getReg(Cond, MBB, IP); + unsigned numZeros = makeAnotherReg(Type::IntTy); + unsigned falseHi = makeAnotherReg(Type::IntTy); + unsigned falseAll = makeAnotherReg(Type::IntTy); + unsigned trueAll = makeAnotherReg(Type::IntTy); + unsigned Temp1 = makeAnotherReg(Type::IntTy); + unsigned Temp2 = makeAnotherReg(Type::IntTy); + + BuildMI(*MBB, IP, PPC32::CNTLZW, 1, numZeros).addReg(CondReg); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, falseHi).addReg(numZeros).addImm(26).addImm(0).addImm(0); + BuildMI(*MBB, IP, PPC32::SRAWI, 2, falseAll).addReg(falseHi).addImm(31); + BuildMI(*MBB, IP, PPC32::NOR, 2, trueAll).addReg(falseAll).addReg(falseAll); + BuildMI(*MBB, IP, PPC32::AND, 2, Temp1).addReg(TrueReg).addReg(trueAll); + BuildMI(*MBB, IP, PPC32::AND, 2, Temp2).addReg(FalseReg).addReg(falseAll); + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(Temp1).addReg(Temp2); + + if (SelectClass == cLong) { + unsigned Temp3 = makeAnotherReg(Type::IntTy); + unsigned Temp4 = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::AND, 2, Temp3).addReg(TrueReg+1).addReg(trueAll); + BuildMI(*MBB, IP, PPC32::AND, 2, Temp4).addReg(FalseReg+1).addReg(falseAll); + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(Temp3).addReg(Temp4); + } + + return; +} + + + +/// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide +/// operand, in the specified target register. +/// +void ISel::promote32(unsigned targetReg, const ValueRecord &VR) { + bool isUnsigned = VR.Ty->isUnsigned() || VR.Ty == Type::BoolTy; + + Value *Val = VR.Val; + const Type *Ty = VR.Ty; + if (Val) { + if (Constant *C = dyn_cast(Val)) { + Val = ConstantExpr::getCast(C, Type::IntTy); + Ty = Type::IntTy; + } + + // If this is a simple constant, just emit a load directly to avoid the copy. + if (ConstantInt *CI = dyn_cast(Val)) { + int TheVal = CI->getRawValue() & 0xFFFFFFFF; + + if (TheVal < 32768 && TheVal >= -32768) { + BuildMI(BB, PPC32::ADDI, 2, targetReg).addReg(PPC32::R0).addImm(TheVal); + } else { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + BuildMI(BB, PPC32::ADDIS, 2, TmpReg).addReg(PPC32::R0).addImm(TheVal >> 16); + BuildMI(BB, PPC32::ORI, 2, targetReg).addReg(TmpReg).addImm(TheVal & 0xFFFF); + } + return; + } + } + + // Make sure we have the register number for this value... + unsigned Reg = Val ? getReg(Val) : VR.Reg; + + switch (getClassB(Ty)) { + case cByte: + // Extend value into target register (8->32) + if (isUnsigned) + BuildMI(BB, PPC32::RLWINM, 4, targetReg).addReg(Reg).addZImm(0).addZImm(24).addZImm(31); + else + BuildMI(BB, PPC32::EXTSB, 1, targetReg).addReg(Reg); + break; + case cShort: + // Extend value into target register (16->32) + if (isUnsigned) + BuildMI(BB, PPC32::RLWINM, 4, targetReg).addReg(Reg).addZImm(0).addZImm(16).addZImm(31); + else + BuildMI(BB, PPC32::EXTSH, 1, targetReg).addReg(Reg); + break; + case cInt: + // Move value into target register (32->32) + BuildMI(BB, PPC32::ORI, 2, targetReg).addReg(Reg).addReg(Reg); + break; + default: + assert(0 && "Unpromotable operand class in promote32"); + } +} + +// just emit blr. +void ISel::visitReturnInst(ReturnInst &I) { + Value *RetVal = I.getOperand(0); + + switch (getClassB(RetVal->getType())) { + case cByte: // integral return values: extend or move into r3 and return + case cShort: + case cInt: + promote32(PPC32::R3, ValueRecord(RetVal)); + break; + case cFP: { // Floats & Doubles: Return in f1 + unsigned RetReg = getReg(RetVal); + BuildMI(BB, PPC32::FMR, 1, PPC32::F1).addReg(RetReg); + break; + } + case cLong: { + unsigned RetReg = getReg(RetVal); + BuildMI(BB, PPC32::OR, 2, PPC32::R3).addReg(RetReg).addReg(RetReg); + BuildMI(BB, PPC32::OR, 2, PPC32::R4).addReg(RetReg+1).addReg(RetReg+1); + break; + } + default: + visitInstruction(I); + } + BuildMI(BB, PPC32::BLR, 1).addImm(0); +} + +// getBlockAfter - Return the basic block which occurs lexically after the +// specified one. +static inline BasicBlock *getBlockAfter(BasicBlock *BB) { + Function::iterator I = BB; ++I; // Get iterator to next block + return I != BB->getParent()->end() ? &*I : 0; +} + +/// visitBranchInst - Handle conditional and unconditional branches here. Note +/// that since code layout is frozen at this point, that if we are trying to +/// jump to a block that is the immediate successor of the current block, we can +/// just make a fall-through (but we don't currently). +/// +void ISel::visitBranchInst(BranchInst &BI) { + // Update machine-CFG edges + BB->addSuccessor (MBBMap[BI.getSuccessor(0)]); + if (BI.isConditional()) + BB->addSuccessor (MBBMap[BI.getSuccessor(1)]); + + BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one + + if (!BI.isConditional()) { // Unconditional branch? + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, PPC32::B, 1).addMBB(MBBMap[BI.getSuccessor(0)]); + return; + } + + // See if we can fold the setcc into the branch itself... + SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(BI.getCondition()); + if (SCI == 0) { + // Nope, cannot fold setcc into this branch. Emit a branch on a condition + // computed some other way... + unsigned condReg = getReg(BI.getCondition()); + BuildMI(BB, PPC32::CMPLI, 3, PPC32::CR0).addImm(0).addReg(condReg).addImm(0); + if (BI.getSuccessor(1) == NextBB) { + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, PPC32::BC, 3).addImm(4).addImm(2).addMBB(MBBMap[BI.getSuccessor(0)]); + } else { + BuildMI(BB, PPC32::BC, 3).addImm(12).addImm(2).addMBB(MBBMap[BI.getSuccessor(1)]); + + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, PPC32::B, 1).addMBB(MBBMap[BI.getSuccessor(0)]); + } + return; + } + + + unsigned OpNum = getSetCCNumber(SCI->getOpcode()); + MachineBasicBlock::iterator MII = BB->end(); + OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII); + + const Type *CompTy = SCI->getOperand(0)->getType(); + bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP; + + // LLVM -> X86 signed X86 unsigned + // ----- ---------- ------------ + // seteq -> je je + // setne -> jne jne + // setlt -> jl jb + // setge -> jge jae + // setgt -> jg ja + // setle -> jle jbe + + static const unsigned BITab[6] = { 2, 2, 0, 0, 1, 1 }; + unsigned BO_true = (OpNum % 2 == 0) ? 12 : 4; + unsigned BO_false = (OpNum % 2 == 0) ? 4 : 12; + unsigned BIval = BITab[0]; + + if (BI.getSuccessor(0) != NextBB) { + BuildMI(BB, PPC32::BC, 3).addImm(BO_true).addImm(BIval).addMBB(MBBMap[BI.getSuccessor(0)]); + if (BI.getSuccessor(1) != NextBB) + BuildMI(BB, PPC32::B, 1).addMBB(MBBMap[BI.getSuccessor(1)]); + } else { + // Change to the inverse condition... + if (BI.getSuccessor(1) != NextBB) { + BuildMI(BB, PPC32::BC, 3).addImm(BO_false).addImm(BIval).addMBB(MBBMap[BI.getSuccessor(1)]); + } + } +} + + +/// doCall - This emits an abstract call instruction, setting up the arguments +/// and the return value as appropriate. For the actual function call itself, +/// it inserts the specified CallMI instruction into the stream. +/// +/// FIXME: See Documentation at the following URL for "correct" behavior +/// +void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI, + const std::vector &Args) { + // Count how many bytes are to be pushed on the stack... + unsigned NumBytes = 0; + + if (!Args.empty()) { + for (unsigned i = 0, e = Args.size(); i != e; ++i) + switch (getClassB(Args[i].Ty)) { + case cByte: case cShort: case cInt: + NumBytes += 4; break; + case cLong: + NumBytes += 8; break; + case cFP: + NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8; + break; + default: assert(0 && "Unknown class!"); + } + + // Adjust the stack pointer for the new arguments... + BuildMI(BB, PPC32::ADJCALLSTACKDOWN, 1).addImm(NumBytes); + + // Arguments go on the stack in reverse order, as specified by the ABI. + unsigned ArgOffset = 0; + unsigned GPR_remaining = 8; + unsigned FPR_remaining = 13; + unsigned GPR_idx = 3; + unsigned FPR_idx = 1; + + for (unsigned i = 0, e = Args.size(); i != e; ++i) { + unsigned ArgReg; + switch (getClassB(Args[i].Ty)) { + case cByte: + case cShort: + // Promote arg to 32 bits wide into a temporary register... + ArgReg = makeAnotherReg(Type::UIntTy); + promote32(ArgReg, Args[i]); + + // Reg or stack? + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, PPC32::R0 + GPR_idx).addReg(ArgReg).addReg(ArgReg); + } else { + BuildMI(BB, PPC32::STW, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + } + break; + case cInt: + ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg; + + // Reg or stack? + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, PPC32::R0 + GPR_idx).addReg(ArgReg).addReg(ArgReg); + } else { + BuildMI(BB, PPC32::STW, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + } + break; + case cLong: + ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg; + + // Reg or stack? + if (GPR_remaining > 1) { + BuildMI(BB, PPC32::OR, 2, PPC32::R0 + GPR_idx).addReg(ArgReg).addReg(ArgReg); + BuildMI(BB, PPC32::OR, 2, PPC32::R0 + GPR_idx + 1).addReg(ArgReg+1).addReg(ArgReg+1); + } else { + BuildMI(BB, PPC32::STW, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + BuildMI(BB, PPC32::STW, 3).addReg(ArgReg+1).addImm(ArgOffset+4).addReg(PPC32::R1); + } + + ArgOffset += 4; // 8 byte entry, not 4. + if (GPR_remaining > 0) { + GPR_remaining -= 1; // uses up 2 GPRs + GPR_idx += 1; + } + break; + case cFP: + ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg; + if (Args[i].Ty == Type::FloatTy) { + // Reg or stack? + if (FPR_remaining > 0) { + BuildMI(BB, PPC32::FMR, 1, PPC32::F0 + FPR_idx).addReg(ArgReg); + FPR_remaining--; + FPR_idx++; + } else { + BuildMI(BB, PPC32::STFS, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + } + } else { + assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!"); + // Reg or stack? + if (FPR_remaining > 0) { + BuildMI(BB, PPC32::FMR, 1, PPC32::F0 + FPR_idx).addReg(ArgReg); + FPR_remaining--; + FPR_idx++; + } else { + BuildMI(BB, PPC32::STFD, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + } + + ArgOffset += 4; // 8 byte entry, not 4. + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + break; + + default: assert(0 && "Unknown class!"); + } + ArgOffset += 4; + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + } else { + BuildMI(BB, PPC32::ADJCALLSTACKDOWN, 1).addImm(0); + } + + BB->push_back(CallMI); + + BuildMI(BB, PPC32::ADJCALLSTACKUP, 1).addImm(NumBytes); + + // If there is a return value, scavenge the result from the location the call + // leaves it in... + // + if (Ret.Ty != Type::VoidTy) { + unsigned DestClass = getClassB(Ret.Ty); + switch (DestClass) { + case cByte: + case cShort: + case cInt: + // Integral results are in r3 + BuildMI(BB, PPC32::OR, 2, Ret.Reg).addReg(PPC32::R3).addReg(PPC32::R3); + case cFP: // Floating-point return values live in f1 + BuildMI(BB, PPC32::FMR, 1, Ret.Reg).addReg(PPC32::F1); + break; + case cLong: // Long values are in r3:r4 + BuildMI(BB, PPC32::OR, 2, Ret.Reg).addReg(PPC32::R3).addReg(PPC32::R3); + BuildMI(BB, PPC32::OR, 2, Ret.Reg+1).addReg(PPC32::R4).addReg(PPC32::R4); + break; + default: assert(0 && "Unknown class!"); + } + } +} + + +/// visitCallInst - Push args on stack and do a procedure call instruction. +void ISel::visitCallInst(CallInst &CI) { + MachineInstr *TheCall; + if (Function *F = CI.getCalledFunction()) { + // Is it an intrinsic function call? + if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) { + visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here + return; + } + + // Emit a CALL instruction with PC-relative displacement. + TheCall = BuildMI(PPC32::CALLpcrel, 1).addGlobalAddress(F, true); + } else { // Emit an indirect call through the CTR + unsigned Reg = getReg(CI.getCalledValue()); + BuildMI(PPC32::MTSPR, 2).addZImm(9).addReg(Reg); + TheCall = BuildMI(PPC32::CALLindirect, 1).addZImm(20).addZImm(0); + } + + std::vector Args; + for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i) + Args.push_back(ValueRecord(CI.getOperand(i))); + + unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0; + doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args); +} + + +/// dyncastIsNan - Return the operand of an isnan operation if this is an isnan. +/// +static Value *dyncastIsNan(Value *V) { + if (CallInst *CI = dyn_cast(V)) + if (Function *F = CI->getCalledFunction()) + if (F->getIntrinsicID() == Intrinsic::isnan) + return CI->getOperand(1); + return 0; +} + +/// isOnlyUsedByUnorderedComparisons - Return true if this value is only used by +/// or's whos operands are all calls to the isnan predicate. +static bool isOnlyUsedByUnorderedComparisons(Value *V) { + assert(dyncastIsNan(V) && "The value isn't an isnan call!"); + + // Check all uses, which will be or's of isnans if this predicate is true. + for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ + Instruction *I = cast(*UI); + if (I->getOpcode() != Instruction::Or) return false; + if (I->getOperand(0) != V && !dyncastIsNan(I->getOperand(0))) return false; + if (I->getOperand(1) != V && !dyncastIsNan(I->getOperand(1))) return false; + } + + return true; +} + +/// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the +/// function, lowering any calls to unknown intrinsic functions into the +/// equivalent LLVM code. +/// +void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) { + for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) + for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) + if (CallInst *CI = dyn_cast(I++)) + if (Function *F = CI->getCalledFunction()) + switch (F->getIntrinsicID()) { + case Intrinsic::not_intrinsic: + case Intrinsic::vastart: + case Intrinsic::vacopy: + case Intrinsic::vaend: + case Intrinsic::returnaddress: + case Intrinsic::frameaddress: + case Intrinsic::isnan: + // We directly implement these intrinsics + break; + case Intrinsic::readio: { + // On PPC, memory operations are in-order. Lower this intrinsic + // into a volatile load. + Instruction *Before = CI->getPrev(); + LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI); + CI->replaceAllUsesWith(LI); + BB->getInstList().erase(CI); + break; + } + case Intrinsic::writeio: { + // On PPC, memory operations are in-order. Lower this intrinsic + // into a volatile store. + Instruction *Before = CI->getPrev(); + StoreInst *LI = new StoreInst(CI->getOperand(1), + CI->getOperand(2), true, CI); + CI->replaceAllUsesWith(LI); + BB->getInstList().erase(CI); + break; + } + default: + // All other intrinsic calls we must lower. + Instruction *Before = CI->getPrev(); + TM.getIntrinsicLowering().LowerIntrinsicCall(CI); + if (Before) { // Move iterator to instruction after call + I = Before; ++I; + } else { + I = BB->begin(); + } + } +} + +void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) { + unsigned TmpReg1, TmpReg2, TmpReg3; + switch (ID) { + case Intrinsic::vastart: + // Get the address of the first vararg value... + TmpReg1 = getReg(CI); + addFrameReference(BuildMI(BB, PPC32::ADDI, 2, TmpReg1), VarArgsFrameIndex); + return; + + case Intrinsic::vacopy: + TmpReg1 = getReg(CI); + TmpReg2 = getReg(CI.getOperand(1)); + BuildMI(BB, PPC32::OR, 2, TmpReg1).addReg(TmpReg2).addReg(TmpReg2); + return; + case Intrinsic::vaend: return; + + case Intrinsic::returnaddress: + case Intrinsic::frameaddress: + TmpReg1 = getReg(CI); + if (cast(CI.getOperand(1))->isNullValue()) { + if (ID == Intrinsic::returnaddress) { + // Just load the return address + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, TmpReg1), + ReturnAddressIndex); + } else { + addFrameReference(BuildMI(BB, PPC32::ADDI, 2, TmpReg1), + ReturnAddressIndex, -4, false); + } + } else { + // Values other than zero are not implemented yet. + BuildMI(BB, PPC32::ADDI, 2, TmpReg1).addReg(PPC32::R0).addImm(0); + } + return; + + case Intrinsic::isnan: + // If this is only used by 'isunordered' style comparisons, don't emit it. + if (isOnlyUsedByUnorderedComparisons(&CI)) return; + TmpReg1 = getReg(CI.getOperand(1)); + emitUCOM(BB, BB->end(), TmpReg1, TmpReg1); + TmpReg2 = makeAnotherReg(Type::IntTy); + BuildMI(BB, PPC32::MFCR, TmpReg2); + TmpReg3 = getReg(CI); + BuildMI(BB, PPC32::RLWINM, 4, TmpReg3).addReg(TmpReg2).addImm(4).addImm(31).addImm(31); + return; + + default: assert(0 && "Error: unknown intrinsics should have been lowered!"); + } +} + +/// visitSimpleBinary - Implement simple binary operators for integral types... +/// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for +/// Xor. +/// +void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) { + unsigned DestReg = getReg(B); + MachineBasicBlock::iterator MI = BB->end(); + Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1); + unsigned Class = getClassB(B.getType()); + + emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg); +} + +/// emitBinaryFPOperation - This method handles emission of floating point +/// Add (0), Sub (1), Mul (2), and Div (3) operations. +void ISel::emitBinaryFPOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned DestReg) { + + // Special case: op Reg, + if (ConstantFP *Op1C = dyn_cast(Op1)) { + // Create a constant pool entry for this constant. + MachineConstantPool *CP = F->getConstantPool(); + unsigned CPI = CP->getConstantPoolIndex(Op1C); + const Type *Ty = Op1->getType(); + + static const unsigned OpcodeTab[][4] = { + { PPC32::FADDS, PPC32::FSUBS, PPC32::FMULS, PPC32::FDIVS }, // Float + { PPC32::FADD, PPC32::FSUB, PPC32::FMUL, PPC32::FDIV }, // Double + }; + + assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); + unsigned TempReg = makeAnotherReg(Ty); + unsigned LoadOpcode = Ty == Type::FloatTy ? PPC32::LFS : PPC32::LFD; + addConstantPoolReference(BuildMI(*BB, IP, LoadOpcode, 2, TempReg), CPI); + + unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass]; + unsigned Op0r = getReg(Op0, BB, IP); + BuildMI(*BB, IP, Opcode, DestReg).addReg(Op0r).addReg(TempReg); + return; + } + + // Special case: R1 = op , R2 + if (ConstantFP *CFP = dyn_cast(Op0)) + if (CFP->isExactlyValue(-0.0) && OperatorClass == 1) { + // -0.0 - X === -X + unsigned op1Reg = getReg(Op1, BB, IP); + BuildMI(*BB, IP, PPC32::FNEG, 1, DestReg).addReg(op1Reg); + return; + } else { + // R1 = op CST, R2 --> R1 = opr R2, CST + + // Create a constant pool entry for this constant. + MachineConstantPool *CP = F->getConstantPool(); + unsigned CPI = CP->getConstantPoolIndex(CFP); + const Type *Ty = CFP->getType(); + + static const unsigned OpcodeTab[][4] = { + { PPC32::FADDS, PPC32::FSUBS, PPC32::FMULS, PPC32::FDIVS }, // Float + { PPC32::FADD, PPC32::FSUB, PPC32::FMUL, PPC32::FDIV }, // Double + }; + + assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); + unsigned TempReg = makeAnotherReg(Ty); + unsigned LoadOpcode = Ty == Type::FloatTy ? PPC32::LFS : PPC32::LFD; + addConstantPoolReference(BuildMI(*BB, IP, LoadOpcode, 2, TempReg), CPI); + + unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass]; + unsigned Op1r = getReg(Op1, BB, IP); + BuildMI(*BB, IP, Opcode, DestReg).addReg(TempReg).addReg(Op1r); + return; + } + + // General case. + static const unsigned OpcodeTab[4] = { + PPC32::FADD, PPC32::FSUB, PPC32::FMUL, PPC32::FDIV + }; + + unsigned Opcode = OpcodeTab[OperatorClass]; + unsigned Op0r = getReg(Op0, BB, IP); + unsigned Op1r = getReg(Op1, BB, IP); + BuildMI(*BB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); +} + +/// emitSimpleBinaryOperation - Implement simple binary operators for integral +/// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for +/// Or, 4 for Xor. +/// +/// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary +/// and constant expression support. +/// +void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned DestReg) { + unsigned Class = getClassB(Op0->getType()); + + // Arithmetic and Bitwise operators + static const unsigned OpcodeTab[5] = { + PPC32::ADD, PPC32::SUB, PPC32::AND, PPC32::OR, PPC32::XOR + }; + // Otherwise, code generate the full operation with a constant. + static const unsigned BottomTab[] = { + PPC32::ADDC, PPC32::SUBC, PPC32::AND, PPC32::OR, PPC32::XOR + }; + static const unsigned TopTab[] = { + PPC32::ADDE, PPC32::SUBFE, PPC32::AND, PPC32::OR, PPC32::XOR + }; + + if (Class == cFP) { + assert(OperatorClass < 2 && "No logical ops for FP!"); + emitBinaryFPOperation(MBB, IP, Op0, Op1, OperatorClass, DestReg); + return; + } + + if (Op0->getType() == Type::BoolTy) { + if (OperatorClass == 3) + // If this is an or of two isnan's, emit an FP comparison directly instead + // of or'ing two isnan's together. + if (Value *LHS = dyncastIsNan(Op0)) + if (Value *RHS = dyncastIsNan(Op1)) { + unsigned Op0Reg = getReg(RHS, MBB, IP), Op1Reg = getReg(LHS, MBB, IP); + unsigned TmpReg = makeAnotherReg(Type::IntTy); + emitUCOM(MBB, IP, Op0Reg, Op1Reg); + BuildMI(*MBB, IP, PPC32::MFCR, TmpReg); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(TmpReg).addImm(4).addImm(31).addImm(31); + return; + } + } + + // sub 0, X -> neg X + if (ConstantInt *CI = dyn_cast(Op0)) + if (OperatorClass == 1 && CI->isNullValue()) { + unsigned op1Reg = getReg(Op1, MBB, IP); + BuildMI(*MBB, IP, PPC32::NEG, 1, DestReg).addReg(op1Reg); + + if (Class == cLong) { + unsigned zeroes = makeAnotherReg(Type::IntTy); + unsigned overflow = makeAnotherReg(Type::IntTy); + unsigned T = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::CNTLZW, 1, zeroes).addReg(op1Reg); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, overflow).addReg(zeroes).addImm(27).addImm(5).addImm(31); + BuildMI(*MBB, IP, PPC32::ADD, 2, T).addReg(op1Reg+1).addReg(overflow); + BuildMI(*MBB, IP, PPC32::NEG, 1, DestReg+1).addReg(T); + } + return; + } + + // Special case: op Reg, + if (ConstantInt *Op1C = dyn_cast(Op1)) { + unsigned Op0r = getReg(Op0, MBB, IP); + + // xor X, -1 -> not X + if (OperatorClass == 4 && Op1C->isAllOnesValue()) { + BuildMI(*MBB, IP, PPC32::NOR, 2, DestReg).addReg(Op0r).addReg(Op0r); + if (Class == cLong) // Invert the top part too + BuildMI(*MBB, IP, PPC32::NOR, 2, DestReg+1).addReg(Op0r+1).addReg(Op0r+1); + return; + } + + unsigned Opcode = OpcodeTab[OperatorClass]; + unsigned Op1r = getReg(Op1, MBB, IP); + + if (Class != cLong) { + BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); + return; + } + + // If the constant is zero in the low 32-bits, just copy the low part + // across and apply the normal 32-bit operation to the high parts. There + // will be no carry or borrow into the top. + if (cast(Op1C)->getRawValue() == 0) { + if (OperatorClass != 2) // All but and... + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(Op0r).addReg(Op0r); + else + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg).addReg(PPC32::R0).addImm(0); + BuildMI(*MBB, IP, Opcode, 2, DestReg+1).addReg(Op0r+1).addReg(Op1r+1); + return; + } + + // If this is a long value and the high or low bits have a special + // property, emit some special cases. + unsigned Op1h = cast(Op1C)->getRawValue() >> 32LL; + + // If this is a logical operation and the top 32-bits are zero, just + // operate on the lower 32. + if (Op1h == 0 && OperatorClass > 1) { + BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); + if (OperatorClass != 2) // All but and + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(Op0r+1).addReg(Op0r+1); + else + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg+1).addReg(PPC32::R0).addImm(0); + return; + } + + // TODO: We could handle lots of other special cases here, such as AND'ing + // with 0xFFFFFFFF00000000 -> noop, etc. + + BuildMI(*MBB, IP, BottomTab[OperatorClass], 2, DestReg).addReg(Op0r).addImm(Op1r); + BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1).addReg(Op0r+1).addImm(Op1r+1); + return; + } + + unsigned Op0r = getReg(Op0, MBB, IP); + unsigned Op1r = getReg(Op1, MBB, IP); + + if (Class != cLong) { + unsigned Opcode = OpcodeTab[OperatorClass]; + BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); + } else { + BuildMI(*MBB, IP, BottomTab[OperatorClass], 2, DestReg).addReg(Op0r).addImm(Op1r); + BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1).addReg(Op0r+1).addImm(Op1r+1); + } + return; +} + +/// doMultiply - Emit appropriate instructions to multiply together the +/// registers op0Reg and op1Reg, and put the result in DestReg. The type of the +/// result should be given as DestTy. +/// +void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI, + unsigned DestReg, const Type *DestTy, + unsigned op0Reg, unsigned op1Reg) { + unsigned Class = getClass(DestTy); + switch (Class) { + case cLong: + BuildMI(*MBB, MBBI, PPC32::MULHW, 2, DestReg+1).addReg(op0Reg+1).addReg(op1Reg+1); + case cInt: + case cShort: + case cByte: + BuildMI(*MBB, MBBI, PPC32::MULLW, 2, DestReg).addReg(op0Reg).addReg(op1Reg); + return; + default: + assert(0 && "doMultiply cannot operate on unknown type!"); + } +} + +// ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It +// returns zero when the input is not exactly a power of two. +static unsigned ExactLog2(unsigned Val) { + if (Val == 0 || (Val & (Val-1))) return 0; + unsigned Count = 0; + while (Val != 1) { + Val >>= 1; + ++Count; + } + return Count+1; +} + + +/// doMultiplyConst - This function is specialized to efficiently codegen an 8, +/// 16, or 32-bit integer multiply by a constant. +void ISel::doMultiplyConst(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + unsigned DestReg, const Type *DestTy, + unsigned op0Reg, unsigned ConstRHS) { + unsigned Class = getClass(DestTy); + // Handle special cases here. + switch (ConstRHS) { + case 0: + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg).addReg(PPC32::R0).addImm(0); + return; + case 1: + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(op0Reg).addReg(op0Reg); + return; + case 2: + BuildMI(*MBB, IP, PPC32::ADD, 2,DestReg).addReg(op0Reg).addReg(op0Reg); + return; + } + + // If the element size is exactly a power of 2, use a shift to get it. + if (unsigned Shift = ExactLog2(ConstRHS)) { + switch (Class) { + default: assert(0 && "Unknown class for this function!"); + case cByte: + case cShort: + case cInt: + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(op0Reg).addImm(Shift-1).addImm(0).addImm(31-Shift-1); + return; + } + } + + // Most general case, emit a normal multiply... + unsigned TmpReg1 = makeAnotherReg(Type::IntTy); + unsigned TmpReg2 = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::ADDIS, 2, TmpReg1).addReg(PPC32::R0).addImm(ConstRHS >> 16); + BuildMI(*MBB, IP, PPC32::ORI, 2, TmpReg2).addReg(TmpReg1).addImm(ConstRHS); + + // Emit a MUL to multiply the register holding the index by + // elementSize, putting the result in OffsetReg. + doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg2); +} + +void ISel::visitMul(BinaryOperator &I) { + unsigned ResultReg = getReg(I); + + Value *Op0 = I.getOperand(0); + Value *Op1 = I.getOperand(1); + + MachineBasicBlock::iterator IP = BB->end(); + emitMultiply(BB, IP, Op0, Op1, ResultReg); +} + +void ISel::emitMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned DestReg) { + MachineBasicBlock &BB = *MBB; + TypeClass Class = getClass(Op0->getType()); + + // Simple scalar multiply? + unsigned Op0Reg = getReg(Op0, &BB, IP); + switch (Class) { + case cByte: + case cShort: + case cInt: + if (ConstantInt *CI = dyn_cast(Op1)) { + unsigned Val = (unsigned)CI->getRawValue(); // Isn't a 64-bit constant + doMultiplyConst(&BB, IP, DestReg, Op0->getType(), Op0Reg, Val); + } else { + unsigned Op1Reg = getReg(Op1, &BB, IP); + doMultiply(&BB, IP, DestReg, Op1->getType(), Op0Reg, Op1Reg); + } + return; + case cFP: + emitBinaryFPOperation(MBB, IP, Op0, Op1, 2, DestReg); + return; + case cLong: + break; + } + + // Long value. We have to do things the hard way... + if (ConstantInt *CI = dyn_cast(Op1)) { + unsigned CLow = CI->getRawValue(); + unsigned CHi = CI->getRawValue() >> 32; + + if (CLow == 0) { + // If the low part of the constant is all zeros, things are simple. + BuildMI(BB, IP, PPC32::ADDI, 2, DestReg).addReg(PPC32::R0).addImm(0); + doMultiplyConst(&BB, IP, DestReg+1, Type::UIntTy, Op0Reg, CHi); + return; + } + + // Multiply the two low parts + unsigned OverflowReg = 0; + if (CLow == 1) { + BuildMI(BB, IP, PPC32::OR, 2, DestReg).addReg(Op0Reg).addReg(Op0Reg); + } else { + unsigned TmpRegL = makeAnotherReg(Type::UIntTy); + unsigned Op1RegL = makeAnotherReg(Type::UIntTy); + OverflowReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, IP, PPC32::ADDIS, 2, TmpRegL).addReg(PPC32::R0).addImm(CLow >> 16); + BuildMI(BB, IP, PPC32::ORI, 2, Op1RegL).addReg(TmpRegL).addImm(CLow); + BuildMI(BB, IP, PPC32::MULLW, 2, DestReg).addReg(Op0Reg).addReg(Op1RegL); + BuildMI(BB, IP, PPC32::MULHW, 2, OverflowReg).addReg(Op0Reg).addReg(Op1RegL); + } + + unsigned AHBLReg = makeAnotherReg(Type::UIntTy); + doMultiplyConst(&BB, IP, AHBLReg, Type::UIntTy, Op0Reg+1, CLow); + + unsigned AHBLplusOverflowReg; + if (OverflowReg) { + AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, IP, PPC32::ADD, 2, // AH*BL+(AL*BL >> 32) + AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg); + } else { + AHBLplusOverflowReg = AHBLReg; + } + + if (CHi == 0) { + BuildMI(BB, IP, PPC32::OR, 2, DestReg+1).addReg(AHBLplusOverflowReg).addReg(AHBLplusOverflowReg); + } else { + unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH + doMultiplyConst(&BB, IP, ALBHReg, Type::UIntTy, Op0Reg, CHi); + + BuildMI(BB, IP, PPC32::ADD, 2, // AL*BH + AH*BL + (AL*BL >> 32) + DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg); + } + return; + } + + // General 64x64 multiply + + unsigned Op1Reg = getReg(Op1, &BB, IP); + + // Multiply the two low parts... capturing carry into EDX + BuildMI(BB, IP, PPC32::MULLW, 2, DestReg).addReg(Op0Reg).addReg(Op1Reg); // AL*BL + + unsigned OverflowReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, IP, PPC32::MULHW, 2, OverflowReg).addReg(Op0Reg).addReg(Op1Reg); // AL*BL >> 32 + + unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL + BuildMI(BB, IP, PPC32::MULLW, 2, AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg); + + unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, IP, PPC32::ADD, 2, // AH*BL+(AL*BL >> 32) + AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg); + + unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH + BuildMI(BB, IP, PPC32::MULLW, 2, ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1); + + BuildMI(BB, IP, PPC32::ADD, 2, // AL*BH + AH*BL + (AL*BL >> 32) + DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg); +} + + +/// visitDivRem - Handle division and remainder instructions... these +/// instruction both require the same instructions to be generated, they just +/// select the result from a different register. Note that both of these +/// instructions work differently for signed and unsigned operands. +/// +void ISel::visitDivRem(BinaryOperator &I) { + unsigned ResultReg = getReg(I); + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); + + MachineBasicBlock::iterator IP = BB->end(); + emitDivRemOperation(BB, IP, Op0, Op1, I.getOpcode() == Instruction::Div, ResultReg); +} + +void ISel::emitDivRemOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, bool isDiv, + unsigned ResultReg) { + const Type *Ty = Op0->getType(); + unsigned Class = getClass(Ty); + switch (Class) { + case cFP: // Floating point divide + if (isDiv) { + emitBinaryFPOperation(BB, IP, Op0, Op1, 3, ResultReg); + return; + } else { // Floating point remainder... + unsigned Op0Reg = getReg(Op0, BB, IP); + unsigned Op1Reg = getReg(Op1, BB, IP); + MachineInstr *TheCall = + BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("fmod", true); + std::vector Args; + Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy)); + Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy)); + doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args); + } + return; + case cLong: { + static const char *FnName[] = + { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" }; + unsigned Op0Reg = getReg(Op0, BB, IP); + unsigned Op1Reg = getReg(Op1, BB, IP); + unsigned NameIdx = Ty->isUnsigned()*2 + isDiv; + MachineInstr *TheCall = + BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol(FnName[NameIdx], true); + + std::vector Args; + Args.push_back(ValueRecord(Op0Reg, Type::LongTy)); + Args.push_back(ValueRecord(Op1Reg, Type::LongTy)); + doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args); + return; + } + case cByte: case cShort: case cInt: + break; // Small integrals, handled below... + default: assert(0 && "Unknown class!"); + } + + // Special case signed division by power of 2. + if (isDiv) + if (ConstantSInt *CI = dyn_cast(Op1)) { + assert(Class != cLong && "This doesn't handle 64-bit divides!"); + int V = CI->getValue(); + + if (V == 1) { // X /s 1 => X + unsigned Op0Reg = getReg(Op0, BB, IP); + BuildMI(*BB, IP, PPC32::OR, 2, ResultReg).addReg(Op0Reg).addReg(Op0Reg); + return; + } + + if (V == -1) { // X /s -1 => -X + unsigned Op0Reg = getReg(Op0, BB, IP); + BuildMI(*BB, IP, PPC32::NEG, 1, ResultReg).addReg(Op0Reg); + return; + } + + bool isNeg = false; + if (V < 0) { // Not a positive power of 2? + V = -V; + isNeg = true; // Maybe it's a negative power of 2. + } + if (unsigned Log = ExactLog2(V)) { + --Log; + unsigned Op0Reg = getReg(Op0, BB, IP); + unsigned TmpReg = makeAnotherReg(Op0->getType()); + if (Log != 1) + BuildMI(*BB, IP, PPC32::SRAWI, 2, TmpReg).addReg(Op0Reg).addImm(Log-1); + else + BuildMI(*BB, IP, PPC32::OR, 2, TmpReg).addReg(Op0Reg).addReg(Op0Reg); + + unsigned TmpReg2 = makeAnotherReg(Op0->getType()); + BuildMI(*BB, IP, PPC32::RLWINM, 4, TmpReg2).addReg(TmpReg).addImm(Log).addImm(32-Log).addImm(31); + + unsigned TmpReg3 = makeAnotherReg(Op0->getType()); + BuildMI(*BB, IP, PPC32::ADD, 2, TmpReg3).addReg(Op0Reg).addReg(TmpReg2); + + unsigned TmpReg4 = isNeg ? makeAnotherReg(Op0->getType()) : ResultReg; + BuildMI(*BB, IP, PPC32::SRAWI, 2, TmpReg4).addReg(Op0Reg).addImm(Log); + + if (isNeg) + BuildMI(*BB, IP, PPC32::NEG, 1, ResultReg).addReg(TmpReg4); + return; + } + } + + unsigned Op0Reg = getReg(Op0, BB, IP); + unsigned Op1Reg = getReg(Op1, BB, IP); + + if (isDiv) { + if (Ty->isSigned()) { + BuildMI(*BB, IP, PPC32::DIVW, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg); + } else { + BuildMI(*BB, IP, PPC32::DIVWU, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg); + } + } else { // Remainder + unsigned TmpReg1 = makeAnotherReg(Op0->getType()); + unsigned TmpReg2 = makeAnotherReg(Op0->getType()); + + if (Ty->isSigned()) { + BuildMI(*BB, IP, PPC32::DIVW, 2, TmpReg1).addReg(Op0Reg).addReg(Op1Reg); + } else { + BuildMI(*BB, IP, PPC32::DIVWU, 2, TmpReg1).addReg(Op0Reg).addReg(Op1Reg); + } + BuildMI(*BB, IP, PPC32::MULLW, 2, TmpReg2).addReg(TmpReg1).addReg(Op1Reg); + BuildMI(*BB, IP, PPC32::SUBF, 2, ResultReg).addReg(TmpReg2).addReg(Op0Reg); + } +} + + +/// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here +/// for constant immediate shift values, and for constant immediate +/// shift values equal to 1. Even the general case is sort of special, +/// because the shift amount has to be in CL, not just any old register. +/// +void ISel::visitShiftInst(ShiftInst &I) { + MachineBasicBlock::iterator IP = BB->end (); + emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1), + I.getOpcode () == Instruction::Shl, I.getType (), + getReg (I)); +} + +/// emitShiftOperation - Common code shared between visitShiftInst and +/// constant expression support. +void ISel::emitShiftOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op, Value *ShiftAmount, bool isLeftShift, + const Type *ResultTy, unsigned DestReg) { + unsigned SrcReg = getReg (Op, MBB, IP); + bool isSigned = ResultTy->isSigned (); + unsigned Class = getClass (ResultTy); + + // Longs, as usual, are handled specially... + if (Class == cLong) { + // If we have a constant shift, we can generate much more efficient code + // than otherwise... + // + if (ConstantUInt *CUI = dyn_cast(ShiftAmount)) { + unsigned Amount = CUI->getValue(); + if (Amount < 32) { + if (isLeftShift) { + // FIXME: RLWIMI is a use-and-def of DestReg+1, but that violates SSA + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg+1).addReg(SrcReg+1).addImm(Amount).addImm(0).addImm(31-Amount); + BuildMI(*MBB, IP, PPC32::RLWIMI, 5).addReg(DestReg+1).addReg(SrcReg).addImm(Amount).addImm(32-Amount).addImm(31); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addImm(Amount).addImm(0).addImm(31-Amount); + } else { + // FIXME: RLWIMI is a use-and-def of DestReg, but that violates SSA + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addImm(32-Amount).addImm(Amount).addImm(31); + BuildMI(*MBB, IP, PPC32::RLWIMI, 5).addReg(DestReg).addReg(SrcReg+1).addImm(32-Amount).addImm(0).addImm(Amount-1); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg+1).addReg(SrcReg+1).addImm(32-Amount).addImm(Amount).addImm(31); + } + } else { // Shifting more than 32 bits + Amount -= 32; + if (isLeftShift) { + if (Amount != 0) { + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg+1).addReg(SrcReg).addImm(Amount).addImm(0).addImm(31-Amount); + } else { + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(SrcReg).addReg(SrcReg); + } + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg).addReg(PPC32::R0).addImm(0); + } else { + if (Amount != 0) { + if (isSigned) + BuildMI(*MBB, IP, PPC32::SRAWI, 2, DestReg).addReg(SrcReg+1).addImm(Amount); + else + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg+1).addImm(32-Amount).addImm(Amount).addImm(31); + } else { + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg+1).addReg(SrcReg+1); + } + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg+1).addReg(PPC32::R0).addImm(0); + } + } + } else { + unsigned TmpReg1 = makeAnotherReg(Type::IntTy); + unsigned TmpReg2 = makeAnotherReg(Type::IntTy); + unsigned TmpReg3 = makeAnotherReg(Type::IntTy); + unsigned TmpReg4 = makeAnotherReg(Type::IntTy); + unsigned TmpReg5 = makeAnotherReg(Type::IntTy); + unsigned TmpReg6 = makeAnotherReg(Type::IntTy); + unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP); + + if (isLeftShift) { + BuildMI(*MBB, IP, PPC32::SUBFIC, 2, TmpReg1).addReg(ShiftAmountReg).addImm(32); + BuildMI(*MBB, IP, PPC32::SLW, 2, TmpReg2).addReg(SrcReg+1).addReg(ShiftAmountReg); + BuildMI(*MBB, IP, PPC32::SRW, 2, TmpReg3).addReg(SrcReg).addReg(TmpReg1); + BuildMI(*MBB, IP, PPC32::OR, 2, TmpReg4).addReg(TmpReg2).addReg(TmpReg3); + BuildMI(*MBB, IP, PPC32::ADDI, 2, TmpReg5).addReg(ShiftAmountReg).addImm(-32); + BuildMI(*MBB, IP, PPC32::SLW, 2, TmpReg6).addReg(SrcReg).addReg(TmpReg5); + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(TmpReg4).addReg(TmpReg6); + BuildMI(*MBB, IP, PPC32::SLW, 2, DestReg).addReg(SrcReg).addReg(ShiftAmountReg); + } else { + if (isSigned) { + // FIXME: Unimplmented + // Page C-3 of the PowerPC 32bit Programming Environments Manual + } else { + BuildMI(*MBB, IP, PPC32::SUBFIC, 2, TmpReg1).addReg(ShiftAmountReg).addImm(32); + BuildMI(*MBB, IP, PPC32::SRW, 2, TmpReg2).addReg(SrcReg).addReg(ShiftAmountReg); + BuildMI(*MBB, IP, PPC32::SLW, 2, TmpReg3).addReg(SrcReg+1).addReg(TmpReg1); + BuildMI(*MBB, IP, PPC32::OR, 2, TmpReg4).addReg(TmpReg2).addReg(TmpReg3); + BuildMI(*MBB, IP, PPC32::ADDI, 2, TmpReg5).addReg(ShiftAmountReg).addImm(-32); + BuildMI(*MBB, IP, PPC32::SRW, 2, TmpReg6).addReg(SrcReg+1).addReg(TmpReg5); + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(TmpReg4).addReg(TmpReg6); + BuildMI(*MBB, IP, PPC32::SRW, 2, DestReg+1).addReg(SrcReg+1).addReg(ShiftAmountReg); + } + } + } + return; + } + + if (ConstantUInt *CUI = dyn_cast(ShiftAmount)) { + // The shift amount is constant, guaranteed to be a ubyte. Get its value. + assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?"); + unsigned Amount = CUI->getValue(); + + if (isLeftShift) { + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addImm(Amount).addImm(0).addImm(31-Amount); + } else { + if (isSigned) { + BuildMI(*MBB, IP, PPC32::SRAWI, 2, DestReg).addReg(SrcReg).addImm(Amount); + } else { + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addImm(32-Amount).addImm(Amount).addImm(31); + } + } + } else { // The shift amount is non-constant. + unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP); + + if (isLeftShift) { + BuildMI(*MBB, IP, PPC32::SLW, 2, DestReg).addReg(SrcReg).addReg(ShiftAmountReg); + } else { + BuildMI(*MBB, IP, isSigned ? PPC32::SRAW : PPC32::SRW, 2, DestReg).addReg(SrcReg).addReg(ShiftAmountReg); + } + } +} + + +/// visitLoadInst - Implement LLVM load instructions +/// +void ISel::visitLoadInst(LoadInst &I) { + static const unsigned Opcodes[] = { PPC32::LBZ, PPC32::LHZ, PPC32::LWZ, PPC32::LFS }; + unsigned Class = getClassB(I.getType()); + unsigned Opcode = Opcodes[Class]; + if (I.getType() == Type::DoubleTy) Opcode = PPC32::LFD; + + unsigned DestReg = getReg(I); + + if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) { + unsigned FI = getFixedSizedAllocaFI(AI); + if (Class == cLong) { + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, DestReg), FI); + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, DestReg+1), FI, 4); + } else { + addFrameReference(BuildMI(BB, Opcode, 2, DestReg), FI); + } + } else { + unsigned SrcAddrReg = getReg(I.getOperand(0)); + + if (Class == cLong) { + BuildMI(BB, PPC32::LWZ, 2, DestReg).addImm(0).addReg(SrcAddrReg); + BuildMI(BB, PPC32::LWZ, 2, DestReg+1).addImm(4).addReg(SrcAddrReg); + } else { + BuildMI(BB, Opcode, 2, DestReg).addImm(0).addReg(SrcAddrReg); + } + } +} + +/// visitStoreInst - Implement LLVM store instructions +/// +void ISel::visitStoreInst(StoreInst &I) { + unsigned ValReg = getReg(I.getOperand(0)); + unsigned AddressReg = getReg(I.getOperand(1)); + + const Type *ValTy = I.getOperand(0)->getType(); + unsigned Class = getClassB(ValTy); + + if (Class == cLong) { + BuildMI(BB, PPC32::STW, 3).addReg(ValReg).addImm(0).addReg(AddressReg); + BuildMI(BB, PPC32::STW, 3).addReg(ValReg+1).addImm(4).addReg(AddressReg); + return; + } + + static const unsigned Opcodes[] = { + PPC32::STB, PPC32::STH, PPC32::STW, PPC32::STFS + }; + unsigned Opcode = Opcodes[Class]; + if (ValTy == Type::DoubleTy) Opcode = PPC32::STFD; + BuildMI(BB, Opcode, 3).addReg(ValReg).addImm(0).addReg(AddressReg); +} + + +/// visitCastInst - Here we have various kinds of copying with or without sign +/// extension going on. +/// +void ISel::visitCastInst(CastInst &CI) { + Value *Op = CI.getOperand(0); + + unsigned SrcClass = getClassB(Op->getType()); + unsigned DestClass = getClassB(CI.getType()); + // Noop casts are not emitted: getReg will return the source operand as the + // register to use for any uses of the noop cast. + if (DestClass == SrcClass) + return; + + // If this is a cast from a 32-bit integer to a Long type, and the only uses + // of the case are GEP instructions, then the cast does not need to be + // generated explicitly, it will be folded into the GEP. + if (DestClass == cLong && SrcClass == cInt) { + bool AllUsesAreGEPs = true; + for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I) + if (!isa(*I)) { + AllUsesAreGEPs = false; + break; + } + + // No need to codegen this cast if all users are getelementptr instrs... + if (AllUsesAreGEPs) return; + } + + unsigned DestReg = getReg(CI); + MachineBasicBlock::iterator MI = BB->end(); + emitCastOperation(BB, MI, Op, CI.getType(), DestReg); +} + +/// emitCastOperation - Common code shared between visitCastInst and constant +/// expression cast support. +/// +void ISel::emitCastOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Src, const Type *DestTy, + unsigned DestReg) { + const Type *SrcTy = Src->getType(); + unsigned SrcClass = getClassB(SrcTy); + unsigned DestClass = getClassB(DestTy); + unsigned SrcReg = getReg(Src, BB, IP); + + // Implement casts to bool by using compare on the operand followed by set if + // not zero on the result. + if (DestTy == Type::BoolTy) { + switch (SrcClass) { + case cByte: + case cShort: + case cInt: { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + BuildMI(*BB, IP, PPC32::ADDIC, 2, TmpReg).addReg(SrcReg).addImm(-1); + BuildMI(*BB, IP, PPC32::SUBFE, 2, DestReg).addReg(TmpReg).addReg(SrcReg); + break; + } + case cLong: { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + unsigned SrcReg2 = makeAnotherReg(Type::IntTy); + BuildMI(*BB, IP, PPC32::OR, 2, SrcReg2).addReg(SrcReg).addReg(SrcReg+1); + BuildMI(*BB, IP, PPC32::ADDIC, 2, TmpReg).addReg(SrcReg2).addImm(-1); + BuildMI(*BB, IP, PPC32::SUBFE, 2, DestReg).addReg(TmpReg).addReg(SrcReg2); + break; + } + case cFP: + // FIXME + // Load -0.0 + // Compare + // move to CR1 + // Negate -0.0 + // Compare + // CROR + // MFCR + // Left-align + // SRA ? + break; + } + return; + } + + // Implement casts between values of the same type class (as determined by + // getClass) by using a register-to-register move. + if (SrcClass == DestClass) { + if (SrcClass <= cInt) { + BuildMI(*BB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + } else if (SrcClass == cFP && SrcTy == DestTy) { + BuildMI(*BB, IP, PPC32::FMR, 1, DestReg).addReg(SrcReg); + } else if (SrcClass == cFP) { + if (SrcTy == Type::FloatTy) { // float -> double + assert(DestTy == Type::DoubleTy && "Unknown cFP member!"); + BuildMI(*BB, IP, PPC32::FMR, 1, DestReg).addReg(SrcReg); + } else { // double -> float + assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy && + "Unknown cFP member!"); + BuildMI(*BB, IP, PPC32::FRSP, 1, DestReg).addReg(SrcReg); + } + } else if (SrcClass == cLong) { + BuildMI(*BB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + BuildMI(*BB, IP, PPC32::OR, 2, DestReg+1).addReg(SrcReg+1).addReg(SrcReg+1); + } else { + assert(0 && "Cannot handle this type of cast instruction!"); + abort(); + } + return; + } + + // Handle cast of SMALLER int to LARGER int using a move with sign extension + // or zero extension, depending on whether the source type was signed. + if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) && + SrcClass < DestClass) { + bool isLong = DestClass == cLong; + if (isLong) DestClass = cInt; + + bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy; + if (SrcClass < cInt) { + if (isUnsigned) { + unsigned shift = (SrcClass == cByte) ? 24 : 16; + BuildMI(*BB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addZImm(0).addImm(shift).addImm(31); + } else { + BuildMI(*BB, IP, (SrcClass == cByte) ? PPC32::EXTSB : PPC32::EXTSH, 1, DestReg).addReg(SrcReg); + } + } else { + BuildMI(*BB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + } + + if (isLong) { // Handle upper 32 bits as appropriate... + if (isUnsigned) // Zero out top bits... + BuildMI(*BB, IP, PPC32::ADDI, 2, DestReg+1).addReg(PPC32::R0).addImm(0); + else // Sign extend bottom half... + BuildMI(*BB, IP, PPC32::SRAWI, 2, DestReg+1).addReg(DestReg).addImm(31); + } + return; + } + + // Special case long -> int ... + if (SrcClass == cLong && DestClass == cInt) { + BuildMI(*BB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + return; + } + + // Handle cast of LARGER int to SMALLER int with a clear or sign extend + if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt + && SrcClass > DestClass) { + bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy; + if (isUnsigned) { + unsigned shift = (SrcClass == cByte) ? 24 : 16; + BuildMI(*BB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addZImm(0).addImm(shift).addImm(31); + } else { + BuildMI(*BB, IP, (SrcClass == cByte) ? PPC32::EXTSB : PPC32::EXTSH, 1, DestReg).addReg(SrcReg); + } + return; + } + + // Handle casts from integer to floating point now... + if (DestClass == cFP) { + + // Emit a library call for long to float conversion + if (SrcClass == cLong) { + std::vector Args; + Args.push_back(ValueRecord(SrcReg, SrcTy)); + MachineInstr *TheCall = BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("__floatdidf", true); + doCall(ValueRecord(DestReg, DestTy), TheCall, Args); + return; + } + + unsigned TmpReg = makeAnotherReg(Type::IntTy); + switch (SrcTy->getPrimitiveID()) { + case Type::BoolTyID: + case Type::SByteTyID: + BuildMI(*BB, IP, PPC32::EXTSB, 1, TmpReg).addReg(SrcReg); + break; + case Type::UByteTyID: + BuildMI(*BB, IP, PPC32::RLWINM, 4, TmpReg).addReg(SrcReg).addZImm(0).addImm(24).addImm(31); + break; + case Type::ShortTyID: + BuildMI(*BB, IP, PPC32::EXTSB, 1, TmpReg).addReg(SrcReg); + break; + case Type::UShortTyID: + BuildMI(*BB, IP, PPC32::RLWINM, 4, TmpReg).addReg(SrcReg).addZImm(0).addImm(16).addImm(31); + break; + case Type::IntTyID: + BuildMI(*BB, IP, PPC32::OR, 2, TmpReg).addReg(SrcReg).addReg(SrcReg); + break; + case Type::UIntTyID: + BuildMI(*BB, IP, PPC32::OR, 2, TmpReg).addReg(SrcReg).addReg(SrcReg); + break; + default: // No promotion needed... + break; + } + + SrcReg = TmpReg; + + // Spill the integer to memory and reload it from there. + // Also spill room for a special conversion constant + int ConstantFrameIndex = + F->getFrameInfo()->CreateStackObject(Type::DoubleTy, TM.getTargetData()); + int ValueFrameIdx = + F->getFrameInfo()->CreateStackObject(Type::DoubleTy, TM.getTargetData()); + + unsigned constantHi = makeAnotherReg(Type::IntTy); + unsigned constantLo = makeAnotherReg(Type::IntTy); + unsigned ConstF = makeAnotherReg(Type::DoubleTy); + unsigned TempF = makeAnotherReg(Type::DoubleTy); + + if (!SrcTy->isSigned()) { + BuildMI(*BB, IP, PPC32::ADDIS, 2, constantHi).addReg(PPC32::R0).addImm(0x4330); + BuildMI(*BB, IP, PPC32::ADDI, 2, constantLo).addReg(PPC32::R0).addImm(0); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantHi), ConstantFrameIndex); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantLo), ConstantFrameIndex, 4); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantHi), ValueFrameIdx); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(SrcReg), ValueFrameIdx, 4); + addFrameReference(BuildMI(*BB, IP, PPC32::LFD, 2, ConstF), ConstantFrameIndex); + addFrameReference(BuildMI(*BB, IP, PPC32::LFD, 2, TempF), ValueFrameIdx); + BuildMI(*BB, IP, PPC32::FSUB, 2, DestReg).addReg(TempF).addReg(ConstF); + } else { + unsigned TempLo = makeAnotherReg(Type::IntTy); + BuildMI(*BB, IP, PPC32::ADDIS, 2, constantHi).addReg(PPC32::R0).addImm(0x4330); + BuildMI(*BB, IP, PPC32::ADDIS, 2, constantLo).addReg(PPC32::R0).addImm(0x8000); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantHi), ConstantFrameIndex); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantLo), ConstantFrameIndex, 4); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantHi), ValueFrameIdx); + BuildMI(*BB, IP, PPC32::XORIS, 2, TempLo).addReg(SrcReg).addImm(0x8000); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(TempLo), ValueFrameIdx, 4); + addFrameReference(BuildMI(*BB, IP, PPC32::LFD, 2, ConstF), ConstantFrameIndex); + addFrameReference(BuildMI(*BB, IP, PPC32::LFD, 2, TempF), ValueFrameIdx); + BuildMI(*BB, IP, PPC32::FSUB, 2, DestReg).addReg(TempF).addReg(ConstF); + } + return; + } + + // Handle casts from floating point to integer now... + if (SrcClass == cFP) { + + // emit library call + if (DestClass == cLong) { + std::vector Args; + Args.push_back(ValueRecord(SrcReg, SrcTy)); + MachineInstr *TheCall = BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("__fixdfdi", true); + doCall(ValueRecord(DestReg, DestTy), TheCall, Args); + return; + } + + int ValueFrameIdx = + F->getFrameInfo()->CreateStackObject(Type::DoubleTy, TM.getTargetData()); + + // load into 32 bit value, and then truncate as necessary + // FIXME: This is wrong for unsigned dest types + //if (DestTy->isSigned()) { + unsigned TempReg = makeAnotherReg(Type::DoubleTy); + BuildMI(*BB, IP, PPC32::FCTIWZ, 1, TempReg).addReg(SrcReg); + addFrameReference(BuildMI(*BB, IP, PPC32::STFD, 3).addReg(TempReg), ValueFrameIdx); + addFrameReference(BuildMI(*BB, IP, PPC32::LWZ, 2, DestReg), ValueFrameIdx+4); + //} else { + //} + + // FIXME: Truncate return value + return; + } + + // Anything we haven't handled already, we can't (yet) handle at all. + assert(0 && "Unhandled cast instruction!"); + abort(); +} + +/// visitVANextInst - Implement the va_next instruction... +/// +void ISel::visitVANextInst(VANextInst &I) { + unsigned VAList = getReg(I.getOperand(0)); + unsigned DestReg = getReg(I); + + unsigned Size; + switch (I.getArgType()->getPrimitiveID()) { + default: + std::cerr << I; + assert(0 && "Error: bad type for va_next instruction!"); + return; + case Type::PointerTyID: + case Type::UIntTyID: + case Type::IntTyID: + Size = 4; + break; + case Type::ULongTyID: + case Type::LongTyID: + case Type::DoubleTyID: + Size = 8; + break; + } + + // Increment the VAList pointer... + BuildMI(BB, PPC32::ADDI, 2, DestReg).addReg(VAList).addImm(Size); +} + +void ISel::visitVAArgInst(VAArgInst &I) { + unsigned VAList = getReg(I.getOperand(0)); + unsigned DestReg = getReg(I); + + switch (I.getType()->getPrimitiveID()) { + default: + std::cerr << I; + assert(0 && "Error: bad type for va_next instruction!"); + return; + case Type::PointerTyID: + case Type::UIntTyID: + case Type::IntTyID: + BuildMI(BB, PPC32::LWZ, 2, DestReg).addImm(0).addReg(VAList); + break; + case Type::ULongTyID: + case Type::LongTyID: + BuildMI(BB, PPC32::LWZ, 2, DestReg).addImm(0).addReg(VAList); + BuildMI(BB, PPC32::LWZ, 2, DestReg+1).addImm(4).addReg(VAList); + break; + case Type::DoubleTyID: + BuildMI(BB, PPC32::LFD, 2, DestReg).addImm(0).addReg(VAList); + break; + } +} + +/// visitGetElementPtrInst - instruction-select GEP instructions +/// +void ISel::visitGetElementPtrInst(GetElementPtrInst &I) { + unsigned outputReg = getReg(I); + emitGEPOperation(BB, BB->end(), I.getOperand(0),I.op_begin()+1, I.op_end(), outputReg); +} + +void ISel::emitGEPOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Src, User::op_iterator IdxBegin, + User::op_iterator IdxEnd, unsigned TargetReg) { + const TargetData &TD = TM.getTargetData(); + if (ConstantPointerRef *CPR = dyn_cast(Src)) + Src = CPR->getValue(); + + std::vector GEPOps; + GEPOps.resize(IdxEnd-IdxBegin+1); + GEPOps[0] = Src; + std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1); + + std::vector GEPTypes; + GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd), + gep_type_end(Src->getType(), IdxBegin, IdxEnd)); + + // Keep emitting instructions until we consume the entire GEP instruction. + while (!GEPOps.empty()) { + // It's an array or pointer access: [ArraySize x ElementType]. + const SequentialType *SqTy = cast(GEPTypes.back()); + Value *idx = GEPOps.back(); + GEPOps.pop_back(); // Consume a GEP operand + GEPTypes.pop_back(); + + // Many GEP instructions use a [cast (int/uint) to LongTy] as their + // operand on X86. Handle this case directly now... + if (CastInst *CI = dyn_cast(idx)) + if (CI->getOperand(0)->getType() == Type::IntTy || + CI->getOperand(0)->getType() == Type::UIntTy) + idx = CI->getOperand(0); + + // We want to add BaseReg to(idxReg * sizeof ElementType). First, we + // must find the size of the pointed-to type (Not coincidentally, the next + // type is the type of the elements in the array). + const Type *ElTy = SqTy->getElementType(); + unsigned elementSize = TD.getTypeSize(ElTy); + + if (elementSize == 1) { + // If the element size is 1, we don't have to multiply, just add + unsigned idxReg = getReg(idx, MBB, IP); + unsigned Reg = makeAnotherReg(Type::UIntTy); + BuildMI(*MBB, IP, PPC32::ADD, 2,TargetReg).addReg(Reg).addReg(idxReg); + --IP; // Insert the next instruction before this one. + TargetReg = Reg; // Codegen the rest of the GEP into this + } else { + unsigned idxReg = getReg(idx, MBB, IP); + unsigned OffsetReg = makeAnotherReg(Type::UIntTy); + + // Make sure we can back the iterator up to point to the first + // instruction emitted. + MachineBasicBlock::iterator BeforeIt = IP; + if (IP == MBB->begin()) + BeforeIt = MBB->end(); + else + --BeforeIt; + doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize); + + // Emit an ADD to add OffsetReg to the basePtr. + unsigned Reg = makeAnotherReg(Type::UIntTy); + BuildMI(*MBB, IP, PPC32::ADD, 2, TargetReg).addReg(Reg).addReg(OffsetReg); + + // Step to the first instruction of the multiply. + if (BeforeIt == MBB->end()) + IP = MBB->begin(); + else + IP = ++BeforeIt; + + TargetReg = Reg; // Codegen the rest of the GEP into this + } + } +} + +/// visitAllocaInst - If this is a fixed size alloca, allocate space from the +/// frame manager, otherwise do it the hard way. +/// +void ISel::visitAllocaInst(AllocaInst &I) { + // If this is a fixed size alloca in the entry block for the function, we + // statically stack allocate the space, so we don't need to do anything here. + // + if (dyn_castFixedAlloca(&I)) return; + + // Find the data size of the alloca inst's getAllocatedType. + const Type *Ty = I.getAllocatedType(); + unsigned TySize = TM.getTargetData().getTypeSize(Ty); + + // Create a register to hold the temporary result of multiplying the type size + // constant by the variable amount. + unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy); + unsigned SrcReg1 = getReg(I.getArraySize()); + + // TotalSizeReg = mul , + MachineBasicBlock::iterator MBBI = BB->end(); + doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize); + + // AddedSize = add , 15 + unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, PPC32::ADD, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15); + + // AlignedSize = and , ~15 + unsigned AlignedSize = makeAnotherReg(Type::UIntTy); + BuildMI(BB, PPC32::RLWNM, 4, AlignedSize).addReg(AddedSizeReg).addImm(0).addImm(0).addImm(27); + + // Subtract size from stack pointer, thereby allocating some space. + BuildMI(BB, PPC32::SUB, 2, PPC32::R1).addReg(PPC32::R1).addReg(AlignedSize); + + // Put a pointer to the space into the result register, by copying + // the stack pointer. + BuildMI(BB, PPC32::OR, 2, getReg(I)).addReg(PPC32::R1).addReg(PPC32::R1); + + // Inform the Frame Information that we have just allocated a variable-sized + // object. + F->getFrameInfo()->CreateVariableSizedObject(); +} + +/// visitMallocInst - Malloc instructions are code generated into direct calls +/// to the library malloc. +/// +void ISel::visitMallocInst(MallocInst &I) { + unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType()); + unsigned Arg; + + if (ConstantUInt *C = dyn_cast(I.getOperand(0))) { + Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize)); + } else { + Arg = makeAnotherReg(Type::UIntTy); + unsigned Op0Reg = getReg(I.getOperand(0)); + MachineBasicBlock::iterator MBBI = BB->end(); + doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize); + } + + std::vector Args; + Args.push_back(ValueRecord(Arg, Type::UIntTy)); + MachineInstr *TheCall = BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("malloc", true); + doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args); +} + + +/// visitFreeInst - Free instructions are code gen'd to call the free libc +/// function. +/// +void ISel::visitFreeInst(FreeInst &I) { + std::vector Args; + Args.push_back(ValueRecord(I.getOperand(0))); + MachineInstr *TheCall = BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("free", true); + doCall(ValueRecord(0, Type::VoidTy), TheCall, Args); +} + +/// createPPC32SimpleInstructionSelector - This pass converts an LLVM function +/// into a machine code representation is a very simple peep-hole fashion. The +/// generated code sucks but the implementation is nice and simple. +/// +FunctionPass *llvm::createPPCSimpleInstructionSelector(TargetMachine &TM) { + return new ISel(TM); +} diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp new file mode 100644 index 00000000000..697be0907c2 --- /dev/null +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -0,0 +1,694 @@ +//===-- PPC32/Printer.cpp - Convert X86 LLVM code to Intel assembly ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal +// representation of machine-dependent LLVM code to Intel-format +// assembly language. This printer is the output mechanism used +// by `llc' and `lli -print-machineinstrs' on X86. +// +//===----------------------------------------------------------------------===// + +#include + +#include "PowerPC.h" +#include "PowerPCInstrInfo.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/Assembly/Writer.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Support/Mangler.h" +#include "Support/Statistic.h" +#include "Support/StringExtras.h" +#include "Support/CommandLine.h" + +namespace llvm { + +namespace { + Statistic<> EmittedInsts("asm-printer", "Number of machine instrs printed"); + + struct Printer : public MachineFunctionPass { + /// Output stream on which we're printing assembly code. + /// + std::ostream &O; + + /// Target machine description which we query for reg. names, data + /// layout, etc. + /// + TargetMachine &TM; + + /// Name-mangler for global names. + /// + Mangler *Mang; + std::set< std::string > Stubs; + std::set Strings; + + Printer(std::ostream &o, TargetMachine &tm) : O(o), TM(tm) { } + + /// We name each basic block in a Function with a unique number, so + /// that we can consistently refer to them later. This is cleared + /// at the beginning of each call to runOnMachineFunction(). + /// + typedef std::map ValueMapTy; + ValueMapTy NumberForBB; + + /// Cache of mangled name for current function. This is + /// recalculated at the beginning of each call to + /// runOnMachineFunction(). + /// + std::string CurrentFnName; + + virtual const char *getPassName() const { + return "PowerPC Assembly Printer"; + } + + void printMachineInstruction(const MachineInstr *MI); + void printOp(const MachineOperand &MO, + bool elideOffsetKeyword = false); + void printConstantPool(MachineConstantPool *MCP); + bool runOnMachineFunction(MachineFunction &F); + bool doInitialization(Module &M); + bool doFinalization(Module &M); + void emitGlobalConstant(const Constant* CV); + void emitConstantValueOnly(const Constant *CV); + }; +} // end of anonymous namespace + +/// createPPCCodePrinterPass - Returns a pass that prints the X86 +/// assembly code for a MachineFunction to the given output stream, +/// using the given target machine description. This should work +/// regardless of whether the function is in SSA form. +/// +FunctionPass *createPPCCodePrinterPass(std::ostream &o,TargetMachine &tm){ + return new Printer(o, tm); +} + +/// isStringCompatible - Can we treat the specified array as a string? +/// Only if it is an array of ubytes or non-negative sbytes. +/// +static bool isStringCompatible(const ConstantArray *CVA) { + const Type *ETy = cast(CVA->getType())->getElementType(); + if (ETy == Type::UByteTy) return true; + if (ETy != Type::SByteTy) return false; + + for (unsigned i = 0; i < CVA->getNumOperands(); ++i) + if (cast(CVA->getOperand(i))->getValue() < 0) + return false; + + return true; +} + +/// toOctal - Convert the low order bits of X into an octal digit. +/// +static inline char toOctal(int X) { + return (X&7)+'0'; +} + +/// getAsCString - Return the specified array as a C compatible +/// string, only if the predicate isStringCompatible is true. +/// +static void printAsCString(std::ostream &O, const ConstantArray *CVA) { + assert(isStringCompatible(CVA) && "Array is not string compatible!"); + + O << "\""; + for (unsigned i = 0; i < CVA->getNumOperands(); ++i) { + unsigned char C = cast(CVA->getOperand(i))->getRawValue(); + + if (C == '"') { + O << "\\\""; + } else if (C == '\\') { + O << "\\\\"; + } else if (isprint(C)) { + O << C; + } else { + switch(C) { + case '\b': O << "\\b"; break; + case '\f': O << "\\f"; break; + case '\n': O << "\\n"; break; + case '\r': O << "\\r"; break; + case '\t': O << "\\t"; break; + default: + O << '\\'; + O << toOctal(C >> 6); + O << toOctal(C >> 3); + O << toOctal(C >> 0); + break; + } + } + } + O << "\""; +} + +// Print out the specified constant, without a storage class. Only the +// constants valid in constant expressions can occur here. +void Printer::emitConstantValueOnly(const Constant *CV) { + if (CV->isNullValue()) + O << "0"; + else if (const ConstantBool *CB = dyn_cast(CV)) { + assert(CB == ConstantBool::True); + O << "1"; + } else if (const ConstantSInt *CI = dyn_cast(CV)) + O << CI->getValue(); + else if (const ConstantUInt *CI = dyn_cast(CV)) + O << CI->getValue(); + else if (const ConstantPointerRef *CPR = dyn_cast(CV)) + // This is a constant address for a global variable or function. Use the + // name of the variable or function as the address value. + O << Mang->getValueName(CPR->getValue()); + else if (const ConstantExpr *CE = dyn_cast(CV)) { + const TargetData &TD = TM.getTargetData(); + switch(CE->getOpcode()) { + case Instruction::GetElementPtr: { + // generate a symbolic expression for the byte address + const Constant *ptrVal = CE->getOperand(0); + std::vector idxVec(CE->op_begin()+1, CE->op_end()); + if (unsigned Offset = TD.getIndexedOffset(ptrVal->getType(), idxVec)) { + O << "("; + emitConstantValueOnly(ptrVal); + O << ") + " << Offset; + } else { + emitConstantValueOnly(ptrVal); + } + break; + } + case Instruction::Cast: { + // Support only non-converting or widening casts for now, that is, ones + // that do not involve a change in value. This assertion is really gross, + // and may not even be a complete check. + Constant *Op = CE->getOperand(0); + const Type *OpTy = Op->getType(), *Ty = CE->getType(); + + // Remember, kids, pointers on x86 can be losslessly converted back and + // forth into 32-bit or wider integers, regardless of signedness. :-P + assert(((isa(OpTy) + && (Ty == Type::LongTy || Ty == Type::ULongTy + || Ty == Type::IntTy || Ty == Type::UIntTy)) + || (isa(Ty) + && (OpTy == Type::LongTy || OpTy == Type::ULongTy + || OpTy == Type::IntTy || OpTy == Type::UIntTy)) + || (((TD.getTypeSize(Ty) >= TD.getTypeSize(OpTy)) + && OpTy->isLosslesslyConvertibleTo(Ty)))) + && "FIXME: Don't yet support this kind of constant cast expr"); + O << "("; + emitConstantValueOnly(Op); + O << ")"; + break; + } + case Instruction::Add: + O << "("; + emitConstantValueOnly(CE->getOperand(0)); + O << ") + ("; + emitConstantValueOnly(CE->getOperand(1)); + O << ")"; + break; + default: + assert(0 && "Unsupported operator!"); + } + } else { + assert(0 && "Unknown constant value!"); + } +} + +// Print a constant value or values, with the appropriate storage class as a +// prefix. +void Printer::emitGlobalConstant(const Constant *CV) { + const TargetData &TD = TM.getTargetData(); + + if (CV->isNullValue()) { + O << "\t.space\t " << TD.getTypeSize(CV->getType()) << "\n"; + return; + } else if (const ConstantArray *CVA = dyn_cast(CV)) { + if (isStringCompatible(CVA)) { + O << ".ascii"; + printAsCString(O, CVA); + O << "\n"; + } else { // Not a string. Print the values in successive locations + const std::vector &constValues = CVA->getValues(); + for (unsigned i=0; i < constValues.size(); i++) + emitGlobalConstant(cast(constValues[i].get())); + } + return; + } else if (const ConstantStruct *CVS = dyn_cast(CV)) { + // Print the fields in successive locations. Pad to align if needed! + const StructLayout *cvsLayout = TD.getStructLayout(CVS->getType()); + const std::vector& constValues = CVS->getValues(); + unsigned sizeSoFar = 0; + for (unsigned i=0, N = constValues.size(); i < N; i++) { + const Constant* field = cast(constValues[i].get()); + + // Check if padding is needed and insert one or more 0s. + unsigned fieldSize = TD.getTypeSize(field->getType()); + unsigned padSize = ((i == N-1? cvsLayout->StructSize + : cvsLayout->MemberOffsets[i+1]) + - cvsLayout->MemberOffsets[i]) - fieldSize; + sizeSoFar += fieldSize + padSize; + + // Now print the actual field value + emitGlobalConstant(field); + + // Insert the field padding unless it's zero bytes... + if (padSize) + O << "\t.space\t " << padSize << "\n"; + } + assert(sizeSoFar == cvsLayout->StructSize && + "Layout of constant struct may be incorrect!"); + return; + } else if (const ConstantFP *CFP = dyn_cast(CV)) { + // FP Constants are printed as integer constants to avoid losing + // precision... + double Val = CFP->getValue(); + switch (CFP->getType()->getPrimitiveID()) { + default: assert(0 && "Unknown floating point type!"); + case Type::FloatTyID: { + union FU { // Abide by C TBAA rules + float FVal; + unsigned UVal; + } U; + U.FVal = Val; + O << ".long\t" << U.UVal << "\t# float " << Val << "\n"; + return; + } + case Type::DoubleTyID: { + union DU { // Abide by C TBAA rules + double FVal; + uint64_t UVal; + struct { + uint32_t MSWord; + uint32_t LSWord; + } T; + } U; + U.FVal = Val; + + O << ".long\t" << U.T.MSWord << "\t# double most significant word " << Val << "\n"; + O << ".long\t" << U.T.LSWord << "\t# double least significant word" << Val << "\n"; + return; + } + } + } else if (CV->getType()->getPrimitiveSize() == 64) { + const ConstantInt *CI = dyn_cast(CV); + if(CI) { + union DU { // Abide by C TBAA rules + int64_t UVal; + struct { + uint32_t MSWord; + uint32_t LSWord; + } T; + } U; + U.UVal = CI->getRawValue(); + + O << ".long\t" << U.T.MSWord << "\t# Double-word most significant word " << U.UVal << "\n"; + O << ".long\t" << U.T.LSWord << "\t# Double-word least significant word" << U.UVal << "\n"; + return; + } + } + + const Type *type = CV->getType(); + O << "\t"; + switch (type->getPrimitiveID()) { + case Type::UByteTyID: case Type::SByteTyID: + O << ".byte"; + break; + case Type::UShortTyID: case Type::ShortTyID: + O << ".short"; + break; + case Type::BoolTyID: + case Type::PointerTyID: + case Type::UIntTyID: case Type::IntTyID: + O << ".long"; + break; + case Type::ULongTyID: case Type::LongTyID: + assert (0 && "Should have already output double-word constant."); + case Type::FloatTyID: case Type::DoubleTyID: + assert (0 && "Should have already output floating point constant."); + default: + assert (0 && "Can't handle printing this type of thing"); + break; + } + O << "\t"; + emitConstantValueOnly(CV); + O << "\n"; +} + +/// printConstantPool - Print to the current output stream assembly +/// representations of the constants in the constant pool MCP. This is +/// used to print out constants which have been "spilled to memory" by +/// the code generator. +/// +void Printer::printConstantPool(MachineConstantPool *MCP) { + const std::vector &CP = MCP->getConstants(); + const TargetData &TD = TM.getTargetData(); + + if (CP.empty()) return; + + for (unsigned i = 0, e = CP.size(); i != e; ++i) { + O << "\t.const\n"; + O << "\t.align " << (unsigned)TD.getTypeAlignment(CP[i]->getType()) + << "\n"; + O << ".CPI" << CurrentFnName << "_" << i << ":\t\t\t\t\t#" + << *CP[i] << "\n"; + emitGlobalConstant(CP[i]); + } +} + +/// runOnMachineFunction - This uses the printMachineInstruction() +/// method to print assembly for each instruction. +/// +bool Printer::runOnMachineFunction(MachineFunction &MF) { + // BBNumber is used here so that a given Printer will never give two + // BBs the same name. (If you have a better way, please let me know!) + static unsigned BBNumber = 0; + + O << "\n\n"; + // What's my mangled name? + CurrentFnName = Mang->getValueName(MF.getFunction()); + + // Print out constants referenced by the function + printConstantPool(MF.getConstantPool()); + + // Print out labels for the function. + O << "\t.text\n"; + O << "\t.globl\t" << CurrentFnName << "\n"; + O << "\t.align 5\n"; + O << CurrentFnName << ":\n"; + + // Number each basic block so that we can consistently refer to them + // in PC-relative references. + NumberForBB.clear(); + for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); + I != E; ++I) { + NumberForBB[I->getBasicBlock()] = BBNumber++; + } + + // Print out code for the function. + for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); + I != E; ++I) { + // Print a label for the basic block. + O << "L" << NumberForBB[I->getBasicBlock()] << ":\t# " + << I->getBasicBlock()->getName() << "\n"; + for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end(); + II != E; ++II) { + // Print the assembly for the instruction. + O << "\t"; + printMachineInstruction(II); + } + } + + // We didn't modify anything. + return false; +} + + + +void Printer::printOp(const MachineOperand &MO, + bool elideOffsetKeyword /* = false */) { + const MRegisterInfo &RI = *TM.getRegisterInfo(); + int new_symbol; + + switch (MO.getType()) { + case MachineOperand::MO_VirtualRegister: + if (Value *V = MO.getVRegValueOrNull()) { + O << "<" << V->getName() << ">"; + return; + } + // FALLTHROUGH + case MachineOperand::MO_MachineRegister: + O << RI.get(MO.getReg()).Name; + return; + + case MachineOperand::MO_SignExtendedImmed: + case MachineOperand::MO_UnextendedImmed: + O << (int)MO.getImmedValue(); + return; + case MachineOperand::MO_MachineBasicBlock: { + MachineBasicBlock *MBBOp = MO.getMachineBasicBlock(); + O << ".LBB" << Mang->getValueName(MBBOp->getParent()->getFunction()) + << "_" << MBBOp->getNumber () << "\t# " + << MBBOp->getBasicBlock ()->getName (); + return; + } + case MachineOperand::MO_PCRelativeDisp: + std::cerr << "Shouldn't use addPCDisp() when building PPC MachineInstrs"; + abort (); + return; + case MachineOperand::MO_GlobalAddress: + if (!elideOffsetKeyword) { + if(isa(MO.getGlobal())) { + Stubs.insert(Mang->getValueName(MO.getGlobal())); + O << "L" << Mang->getValueName(MO.getGlobal()) << "$stub"; + } else { + O << Mang->getValueName(MO.getGlobal()); + } + } + return; + case MachineOperand::MO_ExternalSymbol: + O << MO.getSymbolName(); + return; + default: + O << ""; return; + } +} + +#if 0 +static inline +unsigned int ValidOpcodes(const MachineInstr *MI, unsigned int ArgType[5]) { + int i; + unsigned int retval = 1; + + for(i = 0; i<5; i++) { + switch(ArgType[i]) { + case none: + break; + case Gpr: + case Gpr0: + Type::UIntTy + case Simm16: + case Zimm16: + case PCRelimm24: + case Imm24: + case Imm5: + case PCRelimm14: + case Imm14: + case Imm2: + case Crf: + case Imm3: + case Imm1: + case Fpr: + case Imm4: + case Imm8: + case Disimm16: + case Spr: + case Sgr: + }; + + } + } +} +#endif + +/// printMachineInstruction -- Print out a single PPC32 LLVM instruction +/// MI in Darwin syntax to the current output stream. +/// +void Printer::printMachineInstruction(const MachineInstr *MI) { + unsigned Opcode = MI->getOpcode(); + const TargetInstrInfo &TII = *TM.getInstrInfo(); + const TargetInstrDescriptor &Desc = TII.get(Opcode); + unsigned int i; + + unsigned int ArgCount = Desc.TSFlags & PPC32II::ArgCountMask; + unsigned int ArgType[5]; + + + ArgType[0] = (Desc.TSFlags>>PPC32II::Arg0TypeShift) & PPC32II::ArgTypeMask; + ArgType[1] = (Desc.TSFlags>>PPC32II::Arg1TypeShift) & PPC32II::ArgTypeMask; + ArgType[2] = (Desc.TSFlags>>PPC32II::Arg2TypeShift) & PPC32II::ArgTypeMask; + ArgType[3] = (Desc.TSFlags>>PPC32II::Arg3TypeShift) & PPC32II::ArgTypeMask; + ArgType[4] = (Desc.TSFlags>>PPC32II::Arg4TypeShift) & PPC32II::ArgTypeMask; + + assert ( ((Desc.TSFlags & PPC32II::VMX) == 0) && "Instruction requires VMX support"); + assert ( ((Desc.TSFlags & PPC32II::PPC64) == 0) && "Instruction requires 64 bit support"); + //assert ( ValidOpcodes(MI, ArgType) && "Instruction has invalid inputs"); + ++EmittedInsts; + + if(Opcode == PPC32::MovePCtoLR) { + O << "mflr r0\n"; + O << "bcl 20,31,L" << CurrentFnName << "$pb\n"; + O << "L" << CurrentFnName << "$pb:\n"; + return; + } + + O << TII.getName(MI->getOpcode()) << " "; + std::cout << TII.getName(MI->getOpcode()) << " expects " << ArgCount << " args\n"; + + if(Opcode == PPC32::LOADLoAddr) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << ", lo16("; + printOp(MI->getOperand(2)); + O << "-L" << CurrentFnName << "$pb)\n"; + return; + } + + if(Opcode == PPC32::LOADHiAddr) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << ", ha16(" ; + printOp(MI->getOperand(2)); + O << "-L" << CurrentFnName << "$pb)\n"; + return; + } + + if( (ArgCount == 3) && (ArgType[1] == PPC32II::Disimm16) ) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << "("; + if((ArgType[2] == PPC32II::Gpr0) && (MI->getOperand(2).getReg() == PPC32::R0)) { + O << "0"; + } else { + printOp(MI->getOperand(2)); + } + O << ")\n"; + } else { + for(i = 0; i< ArgCount; i++) { + if( (ArgType[i] == PPC32II::Gpr0) && ((MI->getOperand(i).getReg()) == PPC32::R0)) { + O << "0"; + } else { + //std::cout << "DEBUG " << (*(TM.getRegisterInfo())).get(MI->getOperand(i).getReg()).Name << "\n"; + printOp(MI->getOperand(i)); + } + if( ArgCount - 1 == i) { + O << "\n"; + } else { + O << ", "; + } + } + } + + return; +} + +bool Printer::doInitialization(Module &M) { + // Tell gas we are outputting Intel syntax (not AT&T syntax) assembly. + // + // Bug: gas in `intel_syntax noprefix' mode interprets the symbol `Sp' in an + // instruction as a reference to the register named sp, and if you try to + // reference a symbol `Sp' (e.g. `mov ECX, OFFSET Sp') then it gets lowercased + // before being looked up in the symbol table. This creates spurious + // `undefined symbol' errors when linking. Workaround: Do not use `noprefix' + // mode, and decorate all register names with percent signs. + // O << "\t.intel_syntax\n"; + Mang = new Mangler(M, true); + return false; // success +} + +// SwitchSection - Switch to the specified section of the executable if we are +// not already in it! +// +static void SwitchSection(std::ostream &OS, std::string &CurSection, + const char *NewSection) { + if (CurSection != NewSection) { + CurSection = NewSection; + if (!CurSection.empty()) + OS << "\t" << NewSection << "\n"; + } +} + +bool Printer::doFinalization(Module &M) { + const TargetData &TD = TM.getTargetData(); + std::string CurSection; + + // Print out module-level global variables here. + for (Module::const_giterator I = M.gbegin(), E = M.gend(); I != E; ++I) + if (I->hasInitializer()) { // External global require no code + O << "\n\n"; + std::string name = Mang->getValueName(I); + Constant *C = I->getInitializer(); + unsigned Size = TD.getTypeSize(C->getType()); + unsigned Align = TD.getTypeAlignment(C->getType()); + + if (C->isNullValue() && + (I->hasLinkOnceLinkage() || I->hasInternalLinkage() || + I->hasWeakLinkage() /* FIXME: Verify correct */)) { + SwitchSection(O, CurSection, ".data"); + if (I->hasInternalLinkage()) + O << "\t.local " << name << "\n"; + + O << "\t.comm " << name << "," << TD.getTypeSize(C->getType()) + << "," << (unsigned)TD.getTypeAlignment(C->getType()); + O << "\t\t# "; + WriteAsOperand(O, I, true, true, &M); + O << "\n"; + } else { + switch (I->getLinkage()) { + case GlobalValue::LinkOnceLinkage: + case GlobalValue::WeakLinkage: // FIXME: Verify correct for weak. + // Nonnull linkonce -> weak + O << "\t.weak " << name << "\n"; + SwitchSection(O, CurSection, ""); + O << "\t.section\t.llvm.linkonce.d." << name << ",\"aw\",@progbits\n"; + break; + + case GlobalValue::AppendingLinkage: + // FIXME: appending linkage variables should go into a section of + // their name or something. For now, just emit them as external. + case GlobalValue::ExternalLinkage: + // If external or appending, declare as a global symbol + O << "\t.globl " << name << "\n"; + // FALL THROUGH + case GlobalValue::InternalLinkage: + if (C->isNullValue()) + SwitchSection(O, CurSection, ".bss"); + else + SwitchSection(O, CurSection, ".data"); + break; + } + + O << "\t.align " << Align << "\n"; + O << name << ":\t\t\t\t# "; + WriteAsOperand(O, I, true, true, &M); + O << " = "; + WriteAsOperand(O, C, false, false, &M); + O << "\n"; + emitGlobalConstant(C); + } + } + + for(std::set::iterator i = Stubs.begin(); i != Stubs.end(); ++i) { + O << ".data\n"; + O << ".section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32\n"; + O << "\t.align 2\n"; + O << "L" << *i << "$stub:\n"; + O << "\t.indirect_symbol " << *i << "\n"; + O << "\tmflr r0\n"; + O << "\tbcl 20,31,L0$" << *i << "\n"; + O << "L0$" << *i << ":\n"; + O << "\tmflr r11\n"; + O << "\taddis r11,r11,ha16(L" << *i << "$lazy_ptr-L0$" << *i << ")\n"; + O << "\tmtlr r0\n"; + O << "\tlwzu r12,lo16(L" << *i << "$lazy_ptr-L0$" << *i << ")(r11)\n"; + O << "\tmtctr r12\n"; + O << "\tbctr\n"; + O << ".data\n"; + O << ".lazy_symbol_pointer\n"; + O << "L" << *i << "$lazy_ptr:\n"; + O << ".indirect_symbol " << *i << "\n"; + O << ".long dyld_stub_binding_helper\n"; + + } + + delete Mang; + return false; // success +} + +} // End llvm namespace diff --git a/lib/Target/PowerPC/PPCCodeEmitter.cpp b/lib/Target/PowerPC/PPCCodeEmitter.cpp new file mode 100644 index 00000000000..3c423e5cef6 --- /dev/null +++ b/lib/Target/PowerPC/PPCCodeEmitter.cpp @@ -0,0 +1,43 @@ +//===-- PowerPCCodeEmitter.cpp - JIT Code Emitter for PowerPC -----*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +#include "PowerPCTargetMachine.h" + +namespace llvm { + +/// addPassesToEmitMachineCode - Add passes to the specified pass manager to get +/// machine code emitted. This uses a MachineCodeEmitter object to handle +/// actually outputting the machine code and resolving things like the address +/// of functions. This method should returns true if machine code emission is +/// not supported. +/// +bool PowerPCTargetMachine::addPassesToEmitMachineCode(FunctionPassManager &PM, + MachineCodeEmitter &MCE) { + return true; + // It should go something like this: + // PM.add(new Emitter(MCE)); // Machine code emitter pass for PowerPC + // Delete machine code for this function after emitting it: + // PM.add(createMachineCodeDeleter()); +} + +void *PowerPCJITInfo::getJITStubForFunction(Function *F, + MachineCodeEmitter &MCE) { + assert (0 && "PowerPCJITInfo::getJITStubForFunction not implemented"); + return 0; +} + +void PowerPCJITInfo::replaceMachineCodeForFunction (void *Old, void *New) { + assert (0 && "PowerPCJITInfo::replaceMachineCodeForFunction not implemented"); +} + +} // end llvm namespace + diff --git a/lib/Target/PowerPC/PPCInstrBuilder.h b/lib/Target/PowerPC/PPCInstrBuilder.h new file mode 100644 index 00000000000..704e17c49b0 --- /dev/null +++ b/lib/Target/PowerPC/PPCInstrBuilder.h @@ -0,0 +1,53 @@ +//===-- PowerPCInstrBuilder.h - Functions to aid building PPC insts -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes functions that may be used with BuildMI from the +// MachineInstrBuilder.h file to simplify generating frame and constant pool +// references. +// +// For reference, the order of operands for memory references is: +// (Operand), Dest Reg, Base Reg, and either Reg Index or Immediate Displacement. +// +//===----------------------------------------------------------------------===// + +#ifndef PPCINSTRBUILDER_H +#define PPCINSTRBUILDER_H + +#include "llvm/CodeGen/MachineInstrBuilder.h" + +namespace llvm { + +/// addFrameReference - This function is used to add a reference to the base of +/// an abstract object on the stack frame of the current function. This +/// reference has base register as the FrameIndex offset until it is resolved. +/// This allows a constant offset to be specified as well... +/// +inline const MachineInstrBuilder & +addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0, bool mem = true) { + if (mem) + return MIB.addSImm(Offset).addFrameIndex(FI); + else + return MIB.addFrameIndex(FI).addSImm(Offset); +} + +/// addConstantPoolReference - This function is used to add a reference to the +/// base of a constant value spilled to the per-function constant pool. The +/// reference has base register ConstantPoolIndex offset which is retained until +/// either machine code emission or assembly output. This allows an optional +/// offset to be added as well. +/// +inline const MachineInstrBuilder & +addConstantPoolReference(const MachineInstrBuilder &MIB, unsigned CPI, + int Offset = 0) { + return MIB.addSImm(Offset).addConstantPoolIndex(CPI); +} + +} // End llvm namespace + +#endif diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td new file mode 100644 index 00000000000..cdb2d95d32b --- /dev/null +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -0,0 +1,2054 @@ +//===- PowerPCInstrInfo.td - Describe the PowerPC Instruction Set -*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +class Format val> { + bits<5> Value = val; +} + +class PPC32Inst : Instruction { + field bits<32> Inst; + bits<3> ArgCount; + bits<5> Arg0Type; + bits<5> Arg1Type; + bits<5> Arg2Type; + bits<5> Arg3Type; + bits<5> Arg4Type; + bit PPC64; + bit VMX; + + let Namespace = "PPC32"; +} + +def Pseudo: Format<0>; +def Gpr : Format<1>; +def Gpr0 : Format<2>; +def Simm16 : Format<3>; +def Zimm16 : Format<4>; +def PCRelimm24 : Format<5>; +def Imm24 : Format<6>; +def Imm5 : Format<7>; +def PCRelimm14 : Format<8>; +def Imm14 : Format<9>; +def Imm2 : Format<10>; +def Crf : Format<11>; +def Imm3 : Format<12>; +def Imm1 : Format<13>; +def Fpr : Format<14>; +def Imm4 : Format<15>; +def Imm8 : Format<16>; +def Disimm16 : Format<17>; +def Disimm14 : Format<18>; +def Spr : Format<19>; +def Sgr : Format<20>; +def Imm15 : Format<21>; +def Vpr : Format<22>; + +class PPC32InstPattern0 opconstant0, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<16> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; + let Inst {15-0} = operand2; +} + +class PPC32InstPattern1 opconstant0, bits<5> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<16> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {20-16} = opconstant1; + let Inst {25-21} = operand0; + let Inst {15-0} = operand1; +} + +class PPC32InstPattern2 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; + let Inst {15-11} = operand2; +} + +class PPC32InstPattern3 opconstant0, bits<16> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {15-0} = opconstant1; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; +} + +class PPC32InstPattern4 opconstant0, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<16> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {20-16} = operand0; + let Inst {25-21} = operand1; + let Inst {15-0} = operand2; +} + +class PPC32InstPattern5 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {20-16} = operand0; + let Inst {25-21} = operand1; + let Inst {15-11} = operand2; +} + +class PPC32InstPattern6 opconstant0, bits<2> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 1; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<24> operand0; + + + let Inst {31-26} = opconstant0; + let Inst {1-0} = opconstant1; + let Inst {25-2} = operand0; +} + +class PPC32InstPattern7 opconstant0, bits<2> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<14> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {1-0} = opconstant1; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; + let Inst {15-2} = operand2; +} + +class PPC32InstPattern8 opconstant0, bits<3> opconstant1, bits<11> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<2> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {15-13} = opconstant1; + let Inst {10-0} = opconstant2; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; + let Inst {12-11} = operand2; +} + +class PPC32InstPattern9 opconstant0, bits<2> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<14> operand1; + + + let Inst {31-21} = opconstant0; + let Inst {1-0} = opconstant1; + let Inst {20-16} = operand0; + let Inst {15-2} = operand1; +} + +class PPC32InstPattern10 opconstant0, bits<2> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 1; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<14> operand0; + + + let Inst {31-16} = opconstant0; + let Inst {1-0} = opconstant1; + let Inst {15-2} = operand0; +} + +class PPC32InstPattern11 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 1; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<2> operand0; + + + let Inst {31-13} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {12-11} = operand0; +} + +class PPC32InstPattern12 opconstant0, bits<3> opconstant1, bits<11> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<2> operand1; + + + let Inst {31-21} = opconstant0; + let Inst {15-13} = opconstant1; + let Inst {10-0} = opconstant2; + let Inst {20-16} = operand0; + let Inst {12-11} = operand1; +} + +class PPC32InstPattern13 opconstant0, bits<1> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 4; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = OperandType3.Value; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<3> operand0; + bits<1> operand1; + bits<5> operand2; + bits<16> operand3; + + + let Inst {31-26} = opconstant0; + let Inst {22} = opconstant1; + let Inst {25-23} = operand0; + let Inst {21} = operand1; + let Inst {20-16} = operand2; + let Inst {15-0} = operand3; +} + +class PPC32InstPattern14 opconstant0, bits<2> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<3> operand0; + bits<5> operand1; + bits<16> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {22-21} = opconstant1; + let Inst {25-23} = operand0; + let Inst {20-16} = operand1; + let Inst {15-0} = operand2; +} + +class PPC32InstPattern15 opconstant0, bits<1> opconstant1, bits<11> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 4; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = OperandType3.Value; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<3> operand0; + bits<1> operand1; + bits<5> operand2; + bits<5> operand3; + + + let Inst {31-26} = opconstant0; + let Inst {22} = opconstant1; + let Inst {10-0} = opconstant2; + let Inst {25-23} = operand0; + let Inst {21} = operand1; + let Inst {20-16} = operand2; + let Inst {15-11} = operand3; +} + +class PPC32InstPattern16 opconstant0, bits<2> opconstant1, bits<11> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<3> operand0; + bits<5> operand1; + bits<5> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {22-21} = opconstant1; + let Inst {10-0} = opconstant2; + let Inst {25-23} = operand0; + let Inst {20-16} = operand1; + let Inst {15-11} = operand2; +} + +class PPC32InstPattern17 opconstant0, bits<16> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {15-0} = opconstant1; + let Inst {20-16} = operand0; + let Inst {25-21} = operand1; +} + +class PPC32InstPattern18 opconstant0, bits<5> opconstant1, bits<6> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {15-11} = opconstant1; + let Inst {5-0} = opconstant2; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; + let Inst {10-6} = operand2; +} + +class PPC32InstPattern19 opconstant0, bits<6> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 4; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = OperandType3.Value; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + bits<5> operand3; + + + let Inst {31-26} = opconstant0; + let Inst {5-0} = opconstant1; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; + let Inst {10-6} = operand2; + let Inst {15-11} = operand3; +} + +class PPC32InstPattern20 opconstant0, bits<5> opconstant1, bits<11> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {20-16} = opconstant1; + let Inst {10-0} = opconstant2; + let Inst {25-21} = operand0; + let Inst {15-11} = operand1; +} + +class PPC32InstPattern21 opconstant0, bits<21> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 1; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + + + let Inst {31-26} = opconstant0; + let Inst {20-0} = opconstant1; + let Inst {25-21} = operand0; +} + +class PPC32InstPattern22 opconstant0, bits<18> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<3> operand0; + bits<5> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {17-0} = opconstant1; + let Inst {25-23} = operand0; + let Inst {22-18} = operand1; +} + +class PPC32InstPattern23 opconstant0, bits<7> opconstant1, bits<12> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<3> operand0; + bits<4> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {22-16} = opconstant1; + let Inst {11-0} = opconstant2; + let Inst {25-23} = operand0; + let Inst {15-12} = operand1; +} + +class PPC32InstPattern24 opconstant0, bits<1> opconstant1, bits<11> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<8> operand0; + bits<5> operand1; + + + let Inst {31-25} = opconstant0; + let Inst {16} = opconstant1; + let Inst {10-0} = opconstant2; + let Inst {24-17} = operand0; + let Inst {15-11} = operand1; +} + +class PPC32InstPattern25 opconstant0, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<16> operand1; + bits<5> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {25-21} = operand0; + let Inst {15-0} = operand1; + let Inst {20-16} = operand2; +} + +class PPC32InstPattern26 opconstant0, bits<2> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<14> operand1; + bits<5> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {1-0} = opconstant1; + let Inst {25-21} = operand0; + let Inst {15-2} = operand1; + let Inst {20-16} = operand2; +} + +class PPC32InstPattern27 opconstant0, bits<2> opconstant1, bits<18> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<3> operand0; + bits<3> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {22-21} = opconstant1; + let Inst {17-0} = opconstant2; + let Inst {25-23} = operand0; + let Inst {20-18} = operand1; +} + +class PPC32InstPattern28 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<10> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {25-21} = operand0; + let Inst {20-11} = operand1; +} + +class PPC32InstPattern29 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<10> operand0; + bits<5> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {20-11} = operand0; + let Inst {25-21} = operand1; +} + +class PPC32InstPattern30 opconstant0, bits<1> opconstant1, bits<12> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<8> operand0; + bits<5> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {20} = opconstant1; + let Inst {11-0} = opconstant2; + let Inst {19-12} = operand0; + let Inst {25-21} = operand1; +} + +class PPC32InstPattern31 opconstant0, bits<23> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 1; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<3> operand0; + + + let Inst {31-26} = opconstant0; + let Inst {22-0} = opconstant1; + let Inst {25-23} = operand0; +} + +class PPC32InstPattern32 opconstant0, bits<1> opconstant1, bits<12> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<8> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {20} = opconstant1; + let Inst {11-0} = opconstant2; + let Inst {25-21} = operand0; + let Inst {19-12} = operand1; +} + +class PPC32InstPattern33 opconstant0, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 0; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = 0; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + + + let Inst {31-0} = opconstant0; +} + +class PPC32InstPattern34 opconstant0, bits<1> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 5; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = OperandType3.Value; + let Arg4Type = OperandType4.Value; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + bits<5> operand3; + bits<5> operand4; + + + let Inst {31-26} = opconstant0; + let Inst {0} = opconstant1; + let Inst {20-16} = operand0; + let Inst {25-21} = operand1; + let Inst {15-11} = operand2; + let Inst {10-6} = operand3; + let Inst {5-1} = operand4; +} + +class PPC32InstPattern35 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + + + let Inst {31-26} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {25-21} = operand0; + let Inst {15-11} = operand1; + let Inst {20-16} = operand2; +} + +class PPC32InstPattern36 opconstant0, bits<21> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 1; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<2> operand0; + + + let Inst {31-23} = opconstant0; + let Inst {20-0} = opconstant1; + let Inst {22-21} = operand0; +} + +class PPC32InstPattern37 opconstant0, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<16> operand1; + + + let Inst {31-21} = opconstant0; + let Inst {20-16} = operand0; + let Inst {15-0} = operand1; +} + +class PPC32InstPattern38 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + + + let Inst {31-21} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {20-16} = operand0; + let Inst {15-11} = operand1; +} + +class PPC32InstPattern39 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<4> operand2; + + + let Inst {31-25} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {20-16} = operand0; + let Inst {15-11} = operand1; + let Inst {24-21} = operand2; +} + +class PPC32InstPattern40 opconstant0, bits<4> opconstant1, bits<16> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<1> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {20-17} = opconstant1; + let Inst {15-0} = opconstant2; + let Inst {25-21} = operand0; + let Inst {16} = operand1; +} + +class PPC32InstPattern41 opconstant0, bits<1> opconstant1, bits<16> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<4> operand0; + bits<5> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {20} = opconstant1; + let Inst {15-0} = opconstant2; + let Inst {19-16} = operand0; + let Inst {25-21} = operand1; +} + +class PPC32InstPattern42 opconstant0, bits<1> opconstant1, bits<16> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<4> operand1; + + + let Inst {31-26} = opconstant0; + let Inst {20} = opconstant1; + let Inst {15-0} = opconstant2; + let Inst {25-21} = operand0; + let Inst {19-16} = operand1; +} + +class PPC32InstPattern43 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 1; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + + + let Inst {31-16} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {15-11} = operand0; +} + +class PPC32InstPattern44 opconstant0, bits<5> opconstant1, bits<11> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 2; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<1> operand1; + + + let Inst {31-22} = opconstant0; + let Inst {20-16} = opconstant1; + let Inst {10-0} = opconstant2; + let Inst {15-11} = operand0; + let Inst {21} = operand1; +} + +class PPC32InstPattern45 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 1; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = 0; + let Arg2Type = 0; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<15> operand0; + + + let Inst {31-26} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {25-11} = operand0; +} + +class PPC32InstPattern46 opconstant0, bits<11> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 3; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = 0; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<2> operand2; + + + let Inst {31-23} = opconstant0; + let Inst {10-0} = opconstant1; + let Inst {20-16} = operand0; + let Inst {15-11} = operand1; + let Inst {22-21} = operand2; +} + +class PPC32InstPattern47 opconstant0, bits<6> opconstant1, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 4; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = OperandType3.Value; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + bits<5> operand3; + + + let Inst {31-26} = opconstant0; + let Inst {5-0} = opconstant1; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; + let Inst {15-11} = operand2; + let Inst {10-6} = operand3; +} + +class PPC32InstPattern48 opconstant0, bits<1> opconstant1, bits<6> opconstant2, bit ppc64, bit vmx> : PPC32Inst { + let Name = name; + let ArgCount = 4; + let PPC64 = ppc64; + let VMX =vmx; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType1.Value; + let Arg2Type = OperandType2.Value; + let Arg3Type = OperandType3.Value; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + bits<4> operand3; + + + let Inst {31-26} = opconstant0; + let Inst {10} = opconstant1; + let Inst {5-0} = opconstant2; + let Inst {25-21} = operand0; + let Inst {20-16} = operand1; + let Inst {15-11} = operand2; + let Inst {9-6} = operand3; +} + +class PPC32InstPatternPseudo : PPC32Inst { + let Name = name; + let ArgCount = 0; + let PPC64 = 0; + let VMX = 0; + + let Arg0Type = OperandType0.Value; + let Arg1Type = OperandType0.Value; + let Arg2Type = OperandType0.Value; + let Arg3Type = OperandType0.Value; + let Arg4Type = 0; + let PPC64 = 0; + let VMX = 0; + bits<5> operand0; + bits<5> operand1; + bits<5> operand2; + bits<4> operand3; + + + let Inst {31-0} = 0; +} + + +let isCall = 1 in + // All calls clobber the non-callee saved registers... + let Defs = [R0, R2, R3, R4, R5, R6, R7, R8, R9, R10] in { + def CALLpcrel : PPC32InstPattern6 <"bl", PCRelimm24, 18, 1, 0, 0>; + def CALLindirect : PPC32InstPattern3 <"bctrl", Imm5, Imm5, 19, 1057, 0, 0>; + } + +let isTerminator = 1, isReturn = 1 in + def BLR : PPC32InstPattern11 <"blr", Imm2, 160768, 32, 0, 0>; + +// Pseudo-instructions: +def PHI : PPC32InstPatternPseudo<"PHI", Pseudo>; // PHI node... +def ADJCALLSTACKDOWN : PPC32InstPatternPseudo<"ADJCALLSTACKDOWN", Pseudo>; +def ADJCALLSTACKUP : PPC32InstPatternPseudo<"ADJCALLSTACKUP", Pseudo>; +def MovePCtoLR : PPC32InstPatternPseudo<"MovePCtoLR", Pseudo>; + +def LOADLoAddr : PPC32InstPattern0 <"addi", Gpr, Gpr0, Simm16, 14, 0, 0>; +def LOADHiAddr : PPC32InstPattern0 <"addis", Gpr, Gpr0, Simm16, 15, 0, 0>; + +def ADDI : PPC32InstPattern0 <"addi", Gpr, Gpr0, Simm16, 14, 0, 0>; +def LI : PPC32InstPattern1 <"li", Gpr, Simm16, 14, 0, 0, 0>; +def ADDIS : PPC32InstPattern0 <"addis", Gpr, Gpr0, Simm16, 15, 0, 0>; +def LIS : PPC32InstPattern1 <"lis", Gpr, Simm16, 15, 0, 0, 0>; +def ADDIC : PPC32InstPattern0 <"addic", Gpr, Gpr, Simm16, 12, 0, 0>; +def ADDICo : PPC32InstPattern0 <"addic.", Gpr, Gpr, Simm16, 13, 0, 0>; +def ADD : PPC32InstPattern2 <"add", Gpr, Gpr, Gpr, 31, 532, 0, 0>; +def ADDo : PPC32InstPattern2 <"add.", Gpr, Gpr, Gpr, 31, 533, 0, 0>; +def ADDO : PPC32InstPattern2 <"addo", Gpr, Gpr, Gpr, 31, 532, 0, 0>; +def ADDOo : PPC32InstPattern2 <"addo.", Gpr, Gpr, Gpr, 31, 533, 0, 0>; +def ADDC : PPC32InstPattern2 <"addc", Gpr, Gpr, Gpr, 31, 20, 0, 0>; +def ADDCo : PPC32InstPattern2 <"addc.", Gpr, Gpr, Gpr, 31, 21, 0, 0>; +def ADDCO : PPC32InstPattern2 <"addco", Gpr, Gpr, Gpr, 31, 20, 0, 0>; +def ADDCOo : PPC32InstPattern2 <"addco.", Gpr, Gpr, Gpr, 31, 21, 0, 0>; +def ADDE : PPC32InstPattern2 <"adde", Gpr, Gpr, Gpr, 31, 276, 0, 0>; +def ADDEo : PPC32InstPattern2 <"adde.", Gpr, Gpr, Gpr, 31, 277, 0, 0>; +def ADDEO : PPC32InstPattern2 <"addeo", Gpr, Gpr, Gpr, 31, 276, 0, 0>; +def ADDEOo : PPC32InstPattern2 <"addeo.", Gpr, Gpr, Gpr, 31, 277, 0, 0>; +def ADDME : PPC32InstPattern3 <"addme", Gpr, Gpr, 31, 468, 0, 0>; +def ADDMEo : PPC32InstPattern3 <"addme.", Gpr, Gpr, 31, 469, 0, 0>; +def ADDMEO : PPC32InstPattern3 <"addmeo", Gpr, Gpr, 31, 1492, 0, 0>; +def ADDMEOo : PPC32InstPattern3 <"addmeo.", Gpr, Gpr, 31, 1493, 0, 0>; +def ADDZE : PPC32InstPattern3 <"addze", Gpr, Gpr, 31, 404, 0, 0>; +def ADDZEo : PPC32InstPattern3 <"addze.", Gpr, Gpr, 31, 405, 0, 0>; +def ADDZEO : PPC32InstPattern3 <"addzeo", Gpr, Gpr, 31, 1428, 0, 0>; +def ADDZEOo : PPC32InstPattern3 <"addzeo.", Gpr, Gpr, 31, 1429, 0, 0>; +def ANDIo : PPC32InstPattern4 <"andi.", Gpr, Gpr, Zimm16, 28, 0, 0>; +def ANDISo : PPC32InstPattern4 <"andis.", Gpr, Gpr, Zimm16, 29, 0, 0>; +def AND : PPC32InstPattern5 <"and", Gpr, Gpr, Gpr, 31, 56, 0, 0>; +def ANDo : PPC32InstPattern5 <"and.", Gpr, Gpr, Gpr, 31, 57, 0, 0>; +def ANDC : PPC32InstPattern5 <"andc", Gpr, Gpr, Gpr, 31, 120, 0, 0>; +def ANDCo : PPC32InstPattern5 <"andc.", Gpr, Gpr, Gpr, 31, 121, 0, 0>; +def B : PPC32InstPattern6 <"b", PCRelimm24, 18, 0, 0, 0>; +def BA : PPC32InstPattern6 <"ba", Imm24, 18, 0, 0, 0>; +def BL : PPC32InstPattern6 <"bl", PCRelimm24, 18, 1, 0, 0>; +def BLA : PPC32InstPattern6 <"bla", Imm24, 18, 1, 0, 0>; +def BC : PPC32InstPattern7 <"bc", Imm5, Imm5, PCRelimm14, 16, 0, 0, 0>; +def BCA : PPC32InstPattern7 <"bca", Imm5, Imm5, Imm14, 16, 0, 0, 0>; +def BCL : PPC32InstPattern7 <"bcl", Imm5, Imm5, PCRelimm14, 16, 1, 0, 0>; +def BCLA : PPC32InstPattern7 <"bcla", Imm5, Imm5, Imm14, 16, 1, 0, 0>; +def BCCTR : PPC32InstPattern8 <"bcctr", Imm5, Imm5, Imm2, 19, 0, 32, 0, 0>; +def BCCTRL : PPC32InstPattern8 <"bcctrl", Imm5, Imm5, Imm2, 19, 0, 33, 0, 0>; +def BCLR : PPC32InstPattern8 <"bclr", Imm5, Imm5, Imm2, 19, 0, 32, 0, 0>; +def BCLRL : PPC32InstPattern8 <"bclrl", Imm5, Imm5, Imm2, 19, 0, 33, 0, 0>; +def BT : PPC32InstPattern9 <"bt", Imm5, PCRelimm14, 524, 0, 0, 0>; +def BTL : PPC32InstPattern9 <"btl", Imm5, PCRelimm14, 524, 1, 0, 0>; +def BF : PPC32InstPattern9 <"bf", Imm5, PCRelimm14, 516, 0, 0, 0>; +def BFL : PPC32InstPattern9 <"bfl", Imm5, PCRelimm14, 516, 1, 0, 0>; +def BDNZ : PPC32InstPattern10 <"bdnz", PCRelimm14, 16896, 0, 0, 0>; +def BDNZL : PPC32InstPattern10 <"bdnzl", PCRelimm14, 16896, 1, 0, 0>; +def BDNZT : PPC32InstPattern9 <"bdnzt", Imm5, PCRelimm14, 520, 0, 0, 0>; +def BDNZTL : PPC32InstPattern9 <"bdnztl", Imm5, PCRelimm14, 520, 1, 0, 0>; +def BDNZF : PPC32InstPattern9 <"bdnzf", Imm5, PCRelimm14, 512, 0, 0, 0>; +def BDNZFL : PPC32InstPattern9 <"bdnzfl", Imm5, PCRelimm14, 512, 1, 0, 0>; +def BDZ : PPC32InstPattern10 <"bdz", PCRelimm14, 16960, 0, 0, 0>; +def BDZL : PPC32InstPattern10 <"bdzl", PCRelimm14, 16960, 1, 0, 0>; +def BDZT : PPC32InstPattern9 <"bdzt", Imm5, PCRelimm14, 522, 0, 0, 0>; +def BDZTL : PPC32InstPattern9 <"bdztl", Imm5, PCRelimm14, 522, 1, 0, 0>; +def BDZF : PPC32InstPattern9 <"bdzf", Imm5, PCRelimm14, 514, 0, 0, 0>; +def BDZFL : PPC32InstPattern9 <"bdzfl", Imm5, PCRelimm14, 514, 1, 0, 0>; +def BTA : PPC32InstPattern9 <"bta", Imm5, Imm14, 524, 0, 0, 0>; +def BTLA : PPC32InstPattern9 <"btla", Imm5, Imm14, 524, 1, 0, 0>; +def BFA : PPC32InstPattern9 <"bfa", Imm5, Imm14, 516, 0, 0, 0>; +def BFLA : PPC32InstPattern9 <"bfla", Imm5, Imm14, 516, 1, 0, 0>; +def BDNZA : PPC32InstPattern10 <"bdnza", Imm14, 16896, 0, 0, 0>; +def BDNZLA : PPC32InstPattern10 <"bdnzla", Imm14, 16896, 1, 0, 0>; +def BDNZTA : PPC32InstPattern9 <"bdnzta", Imm5, Imm14, 520, 0, 0, 0>; +def BDNZTLA : PPC32InstPattern9 <"bdnztla", Imm5, Imm14, 520, 1, 0, 0>; +def BDNZFA : PPC32InstPattern9 <"bdnzfa", Imm5, Imm14, 512, 0, 0, 0>; +def BDNZFLA : PPC32InstPattern9 <"bdnzfla", Imm5, Imm14, 512, 1, 0, 0>; +def BDZA : PPC32InstPattern10 <"bdza", Imm14, 16960, 0, 0, 0>; +def BDZLA : PPC32InstPattern10 <"bdzla", Imm14, 16960, 1, 0, 0>; +def BDZTA : PPC32InstPattern9 <"bdzta", Imm5, Imm14, 522, 0, 0, 0>; +def BDZTLA : PPC32InstPattern9 <"bdztla", Imm5, Imm14, 522, 1, 0, 0>; +def BDZFA : PPC32InstPattern9 <"bdzfa", Imm5, Imm14, 514, 0, 0, 0>; +def BDZFLA : PPC32InstPattern9 <"bdzfla", Imm5, Imm14, 514, 1, 0, 0>; +def BLRL : PPC32InstPattern11 <"blrl", Imm2, 160768, 33, 0, 0>; +def BTLR : PPC32InstPattern12 <"btlr", Imm5, Imm2, 620, 0, 32, 0, 0>; +def BTLRL : PPC32InstPattern12 <"btlrl", Imm5, Imm2, 620, 0, 33, 0, 0>; +def BFLR : PPC32InstPattern12 <"bflr", Imm5, Imm2, 612, 0, 32, 0, 0>; +def BFLRL : PPC32InstPattern12 <"bflrl", Imm5, Imm2, 612, 0, 33, 0, 0>; +def BDNZLR : PPC32InstPattern11 <"bdnzlr", Imm2, 159744, 32, 0, 0>; +def BDNZLRL : PPC32InstPattern11 <"bdnzlrl", Imm2, 159744, 33, 0, 0>; +def BDNZTLR : PPC32InstPattern12 <"bdnztlr", Imm5, Imm2, 616, 0, 32, 0, 0>; +def BDNZTLRL : PPC32InstPattern12 <"bdnztlrl", Imm5, Imm2, 616, 0, 33, 0, 0>; +def BDNZFLR : PPC32InstPattern12 <"bdnzflr", Imm5, Imm2, 608, 0, 32, 0, 0>; +def BDNZFLRL : PPC32InstPattern12 <"bdnzflrl", Imm5, Imm2, 608, 0, 33, 0, 0>; +def BDZLR : PPC32InstPattern11 <"bdzlr", Imm2, 160256, 32, 0, 0>; +def BDZLRL : PPC32InstPattern11 <"bdzlrl", Imm2, 160256, 33, 0, 0>; +def BDZTLR : PPC32InstPattern12 <"bdztlr", Imm5, Imm2, 618, 0, 32, 0, 0>; +def BDZTLRL : PPC32InstPattern12 <"bdztlrl", Imm5, Imm2, 618, 0, 33, 0, 0>; +def BDZFLR : PPC32InstPattern12 <"bdzflr", Imm5, Imm2, 610, 0, 32, 0, 0>; +def BDZFLRL : PPC32InstPattern12 <"bdzflrl", Imm5, Imm2, 610, 0, 33, 0, 0>; +def BCTR : PPC32InstPattern3 <"bctr", Imm5, Imm5, 19, 1056, 0, 0>; +def BCTRL : PPC32InstPattern3 <"bctrl", Imm5, Imm5, 19, 1057, 0, 0>; +def BTCTR : PPC32InstPattern12 <"btctr", Imm5, Imm2, 620, 0, 32, 0, 0>; +def BTCTRL : PPC32InstPattern12 <"btctrl", Imm5, Imm2, 620, 0, 33, 0, 0>; +def BFCTR : PPC32InstPattern12 <"bfctr", Imm5, Imm2, 612, 0, 32, 0, 0>; +def BFCTRL : PPC32InstPattern12 <"bfctrl", Imm5, Imm2, 612, 0, 33, 0, 0>; +def BLT : PPC32InstPattern9 <"blt", Crf, PCRelimm14, 524, 0, 0, 0>; +def BLTL : PPC32InstPattern9 <"bltl", Crf, PCRelimm14, 524, 1, 0, 0>; +def BLE : PPC32InstPattern9 <"ble", Crf, PCRelimm14, 516, 0, 0, 0>; +def BLEL : PPC32InstPattern9 <"blel", Crf, PCRelimm14, 516, 1, 0, 0>; +def BEQ : PPC32InstPattern9 <"beq", Crf, PCRelimm14, 524, 0, 0, 0>; +def BEQL : PPC32InstPattern9 <"beql", Crf, PCRelimm14, 524, 1, 0, 0>; +def BGE : PPC32InstPattern9 <"bge", Crf, PCRelimm14, 516, 0, 0, 0>; +def BGEL : PPC32InstPattern9 <"bgel", Crf, PCRelimm14, 516, 1, 0, 0>; +def BGT : PPC32InstPattern9 <"bgt", Crf, PCRelimm14, 524, 0, 0, 0>; +def BGTL : PPC32InstPattern9 <"bgtl", Crf, PCRelimm14, 524, 1, 0, 0>; +def BNL : PPC32InstPattern9 <"bnl", Crf, PCRelimm14, 516, 0, 0, 0>; +def BNLL : PPC32InstPattern9 <"bnll", Crf, PCRelimm14, 516, 1, 0, 0>; +def BNE : PPC32InstPattern9 <"bne", Crf, PCRelimm14, 516, 0, 0, 0>; +def BNEL : PPC32InstPattern9 <"bnel", Crf, PCRelimm14, 516, 1, 0, 0>; +def BNG : PPC32InstPattern9 <"bng", Crf, PCRelimm14, 516, 0, 0, 0>; +def BNGL : PPC32InstPattern9 <"bngl", Crf, PCRelimm14, 516, 1, 0, 0>; +def BSO : PPC32InstPattern9 <"bso", Crf, PCRelimm14, 524, 0, 0, 0>; +def BSOL : PPC32InstPattern9 <"bsol", Crf, PCRelimm14, 524, 1, 0, 0>; +def BNS : PPC32InstPattern9 <"bns", Crf, PCRelimm14, 516, 0, 0, 0>; +def BNSL : PPC32InstPattern9 <"bnsl", Crf, PCRelimm14, 516, 1, 0, 0>; +def BUN : PPC32InstPattern9 <"bun", Crf, PCRelimm14, 524, 0, 0, 0>; +def BUNL : PPC32InstPattern9 <"bunl", Crf, PCRelimm14, 524, 1, 0, 0>; +def BNU : PPC32InstPattern9 <"bnu", Crf, PCRelimm14, 516, 0, 0, 0>; +def BNUL : PPC32InstPattern9 <"bnul", Crf, PCRelimm14, 516, 1, 0, 0>; +def BLTA : PPC32InstPattern9 <"blta", Crf, Imm14, 524, 0, 0, 0>; +def BLTLA : PPC32InstPattern9 <"bltla", Crf, Imm14, 524, 1, 0, 0>; +def BLEA : PPC32InstPattern9 <"blea", Crf, Imm14, 516, 0, 0, 0>; +def BLELA : PPC32InstPattern9 <"blela", Crf, Imm14, 516, 1, 0, 0>; +def BEQA : PPC32InstPattern9 <"beqa", Crf, Imm14, 524, 0, 0, 0>; +def BEQLA : PPC32InstPattern9 <"beqla", Crf, Imm14, 524, 1, 0, 0>; +def BGEA : PPC32InstPattern9 <"bgea", Crf, Imm14, 516, 0, 0, 0>; +def BGELA : PPC32InstPattern9 <"bgela", Crf, Imm14, 516, 1, 0, 0>; +def BGTA : PPC32InstPattern9 <"bgta", Crf, Imm14, 524, 0, 0, 0>; +def BGTLA : PPC32InstPattern9 <"bgtla", Crf, Imm14, 524, 1, 0, 0>; +def BNLA : PPC32InstPattern9 <"bnla", Crf, Imm14, 516, 0, 0, 0>; +def BNLLA : PPC32InstPattern9 <"bnlla", Crf, Imm14, 516, 1, 0, 0>; +def BNEA : PPC32InstPattern9 <"bnea", Crf, Imm14, 516, 0, 0, 0>; +def BNELA : PPC32InstPattern9 <"bnela", Crf, Imm14, 516, 1, 0, 0>; +def BNGA : PPC32InstPattern9 <"bnga", Crf, Imm14, 516, 0, 0, 0>; +def BNGLA : PPC32InstPattern9 <"bngla", Crf, Imm14, 516, 1, 0, 0>; +def BSOA : PPC32InstPattern9 <"bsoa", Crf, Imm14, 524, 0, 0, 0>; +def BSOLA : PPC32InstPattern9 <"bsola", Crf, Imm14, 524, 1, 0, 0>; +def BNSA : PPC32InstPattern9 <"bnsa", Crf, Imm14, 516, 0, 0, 0>; +def BNSLA : PPC32InstPattern9 <"bnsla", Crf, Imm14, 516, 1, 0, 0>; +def BUNA : PPC32InstPattern9 <"buna", Crf, Imm14, 524, 0, 0, 0>; +def BUNLA : PPC32InstPattern9 <"bunla", Crf, Imm14, 524, 1, 0, 0>; +def BNUA : PPC32InstPattern9 <"bnua", Crf, Imm14, 516, 0, 0, 0>; +def BNULA : PPC32InstPattern9 <"bnula", Crf, Imm14, 516, 1, 0, 0>; +def BLTLR : PPC32InstPattern12 <"bltlr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BLTLRL : PPC32InstPattern12 <"bltlrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BLELR : PPC32InstPattern12 <"blelr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BLELRL : PPC32InstPattern12 <"blelrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BEQLR : PPC32InstPattern12 <"beqlr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BEQLRL : PPC32InstPattern12 <"beqlrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BGELR : PPC32InstPattern12 <"bgelr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BGELRL : PPC32InstPattern12 <"bgelrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BGTLR : PPC32InstPattern12 <"bgtlr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BGTLRL : PPC32InstPattern12 <"bgtlrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BNLLR : PPC32InstPattern12 <"bnllr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNLLRL : PPC32InstPattern12 <"bnllrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BNELR : PPC32InstPattern12 <"bnelr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNELRL : PPC32InstPattern12 <"bnelrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BNGLR : PPC32InstPattern12 <"bnglr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNGLRL : PPC32InstPattern12 <"bnglrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BSOLR : PPC32InstPattern12 <"bsolr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BSOLRL : PPC32InstPattern12 <"bsolrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BNSLR : PPC32InstPattern12 <"bnslr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNSLRL : PPC32InstPattern12 <"bnslrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BUNLR : PPC32InstPattern12 <"bunlr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BUNLRL : PPC32InstPattern12 <"bunlrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BNULR : PPC32InstPattern12 <"bnulr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNULRL : PPC32InstPattern12 <"bnulrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BLTCTR : PPC32InstPattern12 <"bltctr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BLTCTRL : PPC32InstPattern12 <"bltctrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BLECTR : PPC32InstPattern12 <"blectr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BLECTRL : PPC32InstPattern12 <"blectrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BEQCTR : PPC32InstPattern12 <"beqctr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BEQCTRL : PPC32InstPattern12 <"beqctrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BGECTR : PPC32InstPattern12 <"bgectr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BGECTRL : PPC32InstPattern12 <"bgectrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BGTCTR : PPC32InstPattern12 <"bgtctr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BGTCTRL : PPC32InstPattern12 <"bgtctrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BNLCTR : PPC32InstPattern12 <"bnlctr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNLCTRL : PPC32InstPattern12 <"bnlctrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BNECTR : PPC32InstPattern12 <"bnectr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNECTRL : PPC32InstPattern12 <"bnectrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BNGCTR : PPC32InstPattern12 <"bngctr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNGCTRL : PPC32InstPattern12 <"bngctrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BSOCTR : PPC32InstPattern12 <"bsoctr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BSOCTRL : PPC32InstPattern12 <"bsoctrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BNSCTR : PPC32InstPattern12 <"bnsctr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNSCTRL : PPC32InstPattern12 <"bnsctrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def BUNCTR : PPC32InstPattern12 <"bunctr", Crf, Imm2, 620, 0, 32, 0, 0>; +def BUNCTRL : PPC32InstPattern12 <"bunctrl", Crf, Imm2, 620, 0, 33, 0, 0>; +def BNUCTR : PPC32InstPattern12 <"bnuctr", Crf, Imm2, 612, 0, 32, 0, 0>; +def BNUCTRL : PPC32InstPattern12 <"bnuctrl", Crf, Imm2, 612, 0, 33, 0, 0>; +def CMPI : PPC32InstPattern13 <"cmpi", Imm3, Imm1, Gpr, Simm16, 11, 0, 0, 0>; +def CMPWI : PPC32InstPattern14 <"cmpwi", Imm3, Gpr, Simm16, 11, 0, 0, 0>; +def CMPDI : PPC32InstPattern14 <"cmpdi", Imm3, Gpr, Simm16, 11, 1, 0, 0>; +def CMP : PPC32InstPattern15 <"cmp", Imm3, Imm1, Gpr, Gpr, 31, 0, 0, 0, 0>; +def CMPW : PPC32InstPattern16 <"cmpw", Imm3, Gpr, Gpr, 31, 0, 0, 0, 0>; +def CMPD : PPC32InstPattern16 <"cmpd", Imm3, Gpr, Gpr, 31, 1, 0, 0, 0>; +def CMPLI : PPC32InstPattern13 <"cmpli", Imm3, Imm1, Gpr, Zimm16, 10, 0, 0, 0>; +def CMPLWI : PPC32InstPattern14 <"cmplwi", Imm3, Gpr, Zimm16, 10, 0, 0, 0>; +def CMPLDI : PPC32InstPattern14 <"cmpldi", Imm3, Gpr, Zimm16, 10, 1, 0, 0>; +def CMPL : PPC32InstPattern15 <"cmpl", Imm3, Imm1, Gpr, Gpr, 31, 0, 64, 0, 0>; +def CMPLW : PPC32InstPattern16 <"cmplw", Imm3, Gpr, Gpr, 31, 0, 64, 0, 0>; +def CMPLD : PPC32InstPattern16 <"cmpld", Imm3, Gpr, Gpr, 31, 1, 64, 0, 0>; +def CNTLZW : PPC32InstPattern17 <"cntlzw", Gpr, Gpr, 31, 52, 0, 0>; +def CNTLZWo : PPC32InstPattern17 <"cntlzw.", Gpr, Gpr, 31, 53, 0, 0>; +def CNTLZD : PPC32InstPattern17 <"cntlzd", Gpr, Gpr, 31, 116, 1, 0>; +def CNTLZDo : PPC32InstPattern17 <"cntlzd.", Gpr, Gpr, 31, 117, 1, 0>; +def CRAND : PPC32InstPattern2 <"crand", Imm5, Imm5, Imm5, 19, 514, 0, 0>; +def CRANDC : PPC32InstPattern2 <"crandc", Imm5, Imm5, Imm5, 19, 258, 0, 0>; +def CREQV : PPC32InstPattern2 <"creqv", Imm5, Imm5, Imm5, 19, 578, 0, 0>; +def CRNAND : PPC32InstPattern2 <"crnand", Imm5, Imm5, Imm5, 19, 450, 0, 0>; +def CRNOR : PPC32InstPattern2 <"crnor", Imm5, Imm5, Imm5, 19, 66, 0, 0>; +def CROR : PPC32InstPattern2 <"cror", Imm5, Imm5, Imm5, 19, 898, 0, 0>; +def CRORC : PPC32InstPattern2 <"crorc", Imm5, Imm5, Imm5, 19, 834, 0, 0>; +def CRXOR : PPC32InstPattern2 <"crxor", Imm5, Imm5, Imm5, 19, 386, 0, 0>; +def DIVD : PPC32InstPattern2 <"divd", Gpr, Gpr, Gpr, 31, 978, 1, 0>; +def DIVDo : PPC32InstPattern2 <"divd.", Gpr, Gpr, Gpr, 31, 979, 1, 0>; +def DIVDO : PPC32InstPattern2 <"divdo", Gpr, Gpr, Gpr, 31, 978, 1, 0>; +def DIVDOo : PPC32InstPattern2 <"divdo.", Gpr, Gpr, Gpr, 31, 979, 1, 0>; +def DIVDU : PPC32InstPattern2 <"divdu", Gpr, Gpr, Gpr, 31, 914, 1, 0>; +def DIVDUo : PPC32InstPattern2 <"divdu.", Gpr, Gpr, Gpr, 31, 915, 1, 0>; +def DIVDUO : PPC32InstPattern2 <"divduo", Gpr, Gpr, Gpr, 31, 914, 1, 0>; +def DIVDUOo : PPC32InstPattern2 <"divduo.", Gpr, Gpr, Gpr, 31, 915, 1, 0>; +def DIVW : PPC32InstPattern2 <"divw", Gpr, Gpr, Gpr, 31, 982, 0, 0>; +def DIVWo : PPC32InstPattern2 <"divw.", Gpr, Gpr, Gpr, 31, 983, 0, 0>; +def DIVWO : PPC32InstPattern2 <"divwo", Gpr, Gpr, Gpr, 31, 982, 0, 0>; +def DIVWOo : PPC32InstPattern2 <"divwo.", Gpr, Gpr, Gpr, 31, 983, 0, 0>; +def DIVWU : PPC32InstPattern2 <"divwu", Gpr, Gpr, Gpr, 31, 918, 0, 0>; +def DIVWUo : PPC32InstPattern2 <"divwu.", Gpr, Gpr, Gpr, 31, 919, 0, 0>; +def DIVWUO : PPC32InstPattern2 <"divwuo", Gpr, Gpr, Gpr, 31, 918, 0, 0>; +def DIVWUOo : PPC32InstPattern2 <"divwuo.", Gpr, Gpr, Gpr, 31, 919, 0, 0>; +def EQV : PPC32InstPattern5 <"eqv", Gpr, Gpr, Gpr, 31, 568, 0, 0>; +def EQVo : PPC32InstPattern5 <"eqv.", Gpr, Gpr, Gpr, 31, 569, 0, 0>; +def EXTSB : PPC32InstPattern17 <"extsb", Gpr, Gpr, 31, 1908, 0, 0>; +def EXTSBo : PPC32InstPattern17 <"extsb.", Gpr, Gpr, 31, 1909, 0, 0>; +def EXTSH : PPC32InstPattern17 <"extsh", Gpr, Gpr, 31, 1844, 0, 0>; +def EXTSHo : PPC32InstPattern17 <"extsh.", Gpr, Gpr, 31, 1845, 0, 0>; +def EXTSW : PPC32InstPattern17 <"extsw", Gpr, Gpr, 31, 1972, 1, 0>; +def EXTSWo : PPC32InstPattern17 <"extsw.", Gpr, Gpr, 31, 1973, 1, 0>; +def FADD : PPC32InstPattern2 <"fadd", Fpr, Fpr, Fpr, 63, 42, 0, 0>; +def FADDo : PPC32InstPattern2 <"fadd.", Fpr, Fpr, Fpr, 63, 43, 0, 0>; +def FADDS : PPC32InstPattern2 <"fadds", Fpr, Fpr, Fpr, 59, 42, 0, 0>; +def FADDSo : PPC32InstPattern2 <"fadds.", Fpr, Fpr, Fpr, 59, 43, 0, 0>; +def FSUB : PPC32InstPattern2 <"fsub", Fpr, Fpr, Fpr, 63, 40, 0, 0>; +def FSUBo : PPC32InstPattern2 <"fsub.", Fpr, Fpr, Fpr, 63, 41, 0, 0>; +def FSUBS : PPC32InstPattern2 <"fsubs", Fpr, Fpr, Fpr, 59, 40, 0, 0>; +def FSUBSo : PPC32InstPattern2 <"fsubs.", Fpr, Fpr, Fpr, 59, 41, 0, 0>; +def FMUL : PPC32InstPattern18 <"fmul", Fpr, Fpr, Fpr, 63, 0, 18, 0, 0>; +def FMULo : PPC32InstPattern18 <"fmul.", Fpr, Fpr, Fpr, 63, 0, 19, 0, 0>; +def FMULS : PPC32InstPattern18 <"fmuls", Fpr, Fpr, Fpr, 59, 0, 18, 0, 0>; +def FMULSo : PPC32InstPattern18 <"fmuls.", Fpr, Fpr, Fpr, 59, 0, 19, 0, 0>; +def FDIV : PPC32InstPattern2 <"fdiv", Fpr, Fpr, Fpr, 63, 36, 0, 0>; +def FDIVo : PPC32InstPattern2 <"fdiv.", Fpr, Fpr, Fpr, 63, 37, 0, 0>; +def FDIVS : PPC32InstPattern2 <"fdivs", Fpr, Fpr, Fpr, 59, 36, 0, 0>; +def FDIVSo : PPC32InstPattern2 <"fdivs.", Fpr, Fpr, Fpr, 59, 37, 0, 0>; +def FMADD : PPC32InstPattern19 <"fmadd", Fpr, Fpr, Fpr, Fpr, 63, 26, 0, 0>; +def FMADDo : PPC32InstPattern19 <"fmadd.", Fpr, Fpr, Fpr, Fpr, 63, 27, 0, 0>; +def FMADDS : PPC32InstPattern19 <"fmadds", Fpr, Fpr, Fpr, Fpr, 59, 26, 0, 0>; +def FMADDSo : PPC32InstPattern19 <"fmadds.", Fpr, Fpr, Fpr, Fpr, 59, 27, 0, 0>; +def FMSUB : PPC32InstPattern19 <"fmsub", Fpr, Fpr, Fpr, Fpr, 63, 24, 0, 0>; +def FMSUBo : PPC32InstPattern19 <"fmsub.", Fpr, Fpr, Fpr, Fpr, 63, 25, 0, 0>; +def FMSUBS : PPC32InstPattern19 <"fmsubs", Fpr, Fpr, Fpr, Fpr, 59, 24, 0, 0>; +def FMSUBSo : PPC32InstPattern19 <"fmsubs.", Fpr, Fpr, Fpr, Fpr, 59, 25, 0, 0>; +def FNMADD : PPC32InstPattern19 <"fnmadd", Fpr, Fpr, Fpr, Fpr, 63, 30, 0, 0>; +def FNMADDo : PPC32InstPattern19 <"fnmadd.", Fpr, Fpr, Fpr, Fpr, 63, 31, 0, 0>; +def FNMADDS : PPC32InstPattern19 <"fnmadds", Fpr, Fpr, Fpr, Fpr, 59, 30, 0, 0>; +def FNMADDSo : PPC32InstPattern19 <"fnmadds.", Fpr, Fpr, Fpr, Fpr, 59, 31, 0, 0>; +def FNMSUB : PPC32InstPattern19 <"fnmsub", Fpr, Fpr, Fpr, Fpr, 63, 28, 0, 0>; +def FNMSUBo : PPC32InstPattern19 <"fnmsub.", Fpr, Fpr, Fpr, Fpr, 63, 29, 0, 0>; +def FNMSUBS : PPC32InstPattern19 <"fnmsubs", Fpr, Fpr, Fpr, Fpr, 59, 28, 0, 0>; +def FNMSUBSo : PPC32InstPattern19 <"fnmsubs.", Fpr, Fpr, Fpr, Fpr, 59, 29, 0, 0>; +def FMR : PPC32InstPattern20 <"fmr", Fpr, Fpr, 63, 0, 144, 0, 0>; +def FMRo : PPC32InstPattern20 <"fmr.", Fpr, Fpr, 63, 0, 145, 0, 0>; +def FABS : PPC32InstPattern20 <"fabs", Fpr, Fpr, 63, 0, 528, 0, 0>; +def FABSo : PPC32InstPattern20 <"fabs.", Fpr, Fpr, 63, 0, 529, 0, 0>; +def FNEG : PPC32InstPattern20 <"fneg", Fpr, Fpr, 63, 0, 80, 0, 0>; +def FNEGo : PPC32InstPattern20 <"fneg.", Fpr, Fpr, 63, 0, 81, 0, 0>; +def FNABS : PPC32InstPattern20 <"fnabs", Fpr, Fpr, 63, 0, 272, 0, 0>; +def FNABSo : PPC32InstPattern20 <"fnabs.", Fpr, Fpr, 63, 0, 273, 0, 0>; +def FRES : PPC32InstPattern20 <"fres", Fpr, Fpr, 59, 0, 48, 0, 0>; +def FRESo : PPC32InstPattern20 <"fres.", Fpr, Fpr, 59, 0, 49, 0, 0>; +def FRSP : PPC32InstPattern20 <"frsp", Fpr, Fpr, 63, 0, 24, 0, 0>; +def FRSPo : PPC32InstPattern20 <"frsp.", Fpr, Fpr, 63, 0, 25, 0, 0>; +def FRSQRTE : PPC32InstPattern20 <"frsqrte", Fpr, Fpr, 63, 0, 52, 0, 0>; +def FRSQRTEo : PPC32InstPattern20 <"frsqrte.", Fpr, Fpr, 63, 0, 53, 0, 0>; +def FSEL : PPC32InstPattern19 <"fsel", Fpr, Fpr, Fpr, Fpr, 63, 14, 0, 0>; +def FSELo : PPC32InstPattern19 <"fsel.", Fpr, Fpr, Fpr, Fpr, 63, 15, 0, 0>; +def FSQRT : PPC32InstPattern20 <"fsqrt", Fpr, Fpr, 63, 0, 44, 0, 0>; +def FSQRTo : PPC32InstPattern20 <"fsqrt.", Fpr, Fpr, 63, 0, 45, 0, 0>; +def FSQRTS : PPC32InstPattern20 <"fsqrts", Fpr, Fpr, 59, 0, 44, 0, 0>; +def FSQRTSo : PPC32InstPattern20 <"fsqrts.", Fpr, Fpr, 59, 0, 45, 0, 0>; +def FCTID : PPC32InstPattern20 <"fctid", Fpr, Fpr, 63, 0, 604, 1, 0>; +def FCTIDo : PPC32InstPattern20 <"fctid.", Fpr, Fpr, 63, 0, 605, 1, 0>; +def FCTIDZ : PPC32InstPattern20 <"fctidz", Fpr, Fpr, 63, 0, 606, 1, 0>; +def FCTIDZo : PPC32InstPattern20 <"fctidz.", Fpr, Fpr, 63, 0, 607, 1, 0>; +def FCTIW : PPC32InstPattern20 <"fctiw", Fpr, Fpr, 63, 0, 28, 0, 0>; +def FCTIWo : PPC32InstPattern20 <"fctiw.", Fpr, Fpr, 63, 0, 29, 0, 0>; +def FCTIWZ : PPC32InstPattern20 <"fctiwz", Fpr, Fpr, 63, 0, 30, 0, 0>; +def FCTIWZo : PPC32InstPattern20 <"fctiwz.", Fpr, Fpr, 63, 0, 31, 0, 0>; +def FCFID : PPC32InstPattern20 <"fcfid", Fpr, Fpr, 63, 0, 668, 1, 0>; +def FCFIDo : PPC32InstPattern20 <"fcfid.", Fpr, Fpr, 63, 0, 669, 1, 0>; +def FCMPU : PPC32InstPattern16 <"fcmpu", Imm3, Fpr, Fpr, 63, 0, 0, 0, 0>; +def FCMPO : PPC32InstPattern16 <"fcmpo", Imm3, Fpr, Fpr, 63, 0, 64, 0, 0>; +def MFFS : PPC32InstPattern21 <"mffs", Fpr, 63, 1166, 0, 0>; +def MFFSo : PPC32InstPattern21 <"mffs.", Fpr, 63, 1167, 0, 0>; +def MCRFS : PPC32InstPattern22 <"mcrfs", Imm3, Imm5, 63, 128, 0, 0>; +def MTFSFI : PPC32InstPattern23 <"mtfsfi", Imm3, Imm4, 63, 0, 268, 0, 0>; +def MTFSFIo : PPC32InstPattern23 <"mtfsfi.", Imm3, Imm4, 63, 0, 269, 0, 0>; +def MTFSF : PPC32InstPattern24 <"mtfsf", Imm8, Fpr, 126, 0, 398, 0, 0>; +def MTFSFo : PPC32InstPattern24 <"mtfsf.", Imm8, Fpr, 126, 0, 399, 0, 0>; +def MTFSB0 : PPC32InstPattern21 <"mtfsb0", Imm5, 63, 140, 0, 0>; +def MTFSB0o : PPC32InstPattern21 <"mtfsb0.", Imm5, 63, 141, 0, 0>; +def MTFSB1 : PPC32InstPattern21 <"mtfsb1", Imm5, 63, 76, 0, 0>; +def MTFSB1o : PPC32InstPattern21 <"mtfsb1.", Imm5, 63, 77, 0, 0>; +def LBZ : PPC32InstPattern25 <"lbz", Gpr, Disimm16, Gpr0, 34, 0, 0>; +def LBZX : PPC32InstPattern2 <"lbzx", Gpr, Gpr0, Gpr, 31, 174, 0, 0>; +def LBZU : PPC32InstPattern25 <"lbzu", Gpr, Disimm16, Gpr0, 35, 0, 0>; +def LBZUX : PPC32InstPattern2 <"lbzux", Gpr, Gpr, Gpr, 31, 238, 0, 0>; +def LHZ : PPC32InstPattern25 <"lhz", Gpr, Disimm16, Gpr0, 40, 0, 0>; +def LHZX : PPC32InstPattern2 <"lhzx", Gpr, Gpr0, Gpr, 31, 558, 0, 0>; +def LHZU : PPC32InstPattern25 <"lhzu", Gpr, Disimm16, Gpr0, 41, 0, 0>; +def LHZUX : PPC32InstPattern2 <"lhzux", Gpr, Gpr, Gpr, 31, 622, 0, 0>; +def LHA : PPC32InstPattern25 <"lha", Gpr, Disimm16, Gpr0, 42, 0, 0>; +def LHAX : PPC32InstPattern2 <"lhax", Gpr, Gpr0, Gpr, 31, 686, 0, 0>; +def LHAU : PPC32InstPattern25 <"lhau", Gpr, Disimm16, Gpr, 43, 0, 0>; +def LHAUX : PPC32InstPattern2 <"lhaux", Gpr, Gpr, Gpr, 31, 750, 0, 0>; +def LWZ : PPC32InstPattern25 <"lwz", Gpr, Disimm16, Gpr0, 32, 0, 0>; +def LWZX : PPC32InstPattern2 <"lwzx", Gpr, Gpr0, Gpr, 31, 46, 0, 0>; +def LWZU : PPC32InstPattern25 <"lwzu", Gpr, Disimm16, Gpr, 33, 0, 0>; +def LWZUX : PPC32InstPattern2 <"lwzux", Gpr, Gpr, Gpr, 31, 110, 0, 0>; +def LWA : PPC32InstPattern26 <"lwa", Gpr, Disimm14, Gpr0, 58, 0, 1, 0>; +def LWAX : PPC32InstPattern2 <"lwax", Gpr, Gpr0, Gpr, 31, 682, 1, 0>; +def LWAUX : PPC32InstPattern2 <"lwaux", Gpr, Gpr, Gpr, 31, 746, 1, 0>; +def LD : PPC32InstPattern26 <"ld", Gpr, Disimm14, Gpr0, 58, 0, 1, 0>; +def LDX : PPC32InstPattern2 <"ldx", Gpr, Gpr0, Gpr, 31, 42, 1, 0>; +def LDU : PPC32InstPattern26 <"ldu", Gpr, Disimm14, Gpr, 58, 1, 1, 0>; +def LDUX : PPC32InstPattern2 <"ldux", Gpr, Gpr, Gpr, 31, 106, 1, 0>; +def LMW : PPC32InstPattern25 <"lmw", Gpr, Disimm16, Gpr0, 46, 0, 0>; +def STMW : PPC32InstPattern25 <"stmw", Gpr, Disimm16, Gpr0, 47, 0, 0>; +def LHBRX : PPC32InstPattern2 <"lhbrx", Gpr, Gpr0, Gpr, 31, 556, 0, 0>; +def LWBRX : PPC32InstPattern2 <"lwbrx", Gpr, Gpr0, Gpr, 31, 44, 0, 0>; +def LSWX : PPC32InstPattern2 <"lswx", Gpr, Gpr0, Gpr, 31, 42, 0, 0>; +def LWARX : PPC32InstPattern2 <"lwarx", Gpr, Gpr0, Gpr, 31, 40, 0, 0>; +def LDARX : PPC32InstPattern2 <"ldarx", Gpr, Gpr0, Gpr, 31, 168, 1, 0>; +def LSWI : PPC32InstPattern2 <"lswi", Gpr, Gpr0, Imm5, 31, 170, 0, 0>; +def LFS : PPC32InstPattern25 <"lfs", Fpr, Disimm16, Gpr0, 48, 0, 0>; +def LFSU : PPC32InstPattern25 <"lfsu", Fpr, Disimm16, Gpr, 49, 0, 0>; +def LFSX : PPC32InstPattern2 <"lfsx", Fpr, Gpr0, Gpr, 31, 46, 0, 0>; +def LFSUX : PPC32InstPattern2 <"lfsux", Fpr, Gpr, Gpr, 31, 110, 0, 0>; +def LFD : PPC32InstPattern25 <"lfd", Fpr, Disimm16, Gpr0, 50, 0, 0>; +def LFDU : PPC32InstPattern25 <"lfdu", Fpr, Disimm16, Gpr, 51, 0, 0>; +def LFDX : PPC32InstPattern2 <"lfdx", Fpr, Gpr0, Gpr, 31, 174, 0, 0>; +def LFDUX : PPC32InstPattern2 <"lfdux", Fpr, Gpr, Gpr, 31, 238, 0, 0>; +def LA : PPC32InstPattern25 <"la", Gpr, Disimm16, Gpr0, 14, 0, 0>; +def MCRF : PPC32InstPattern27 <"mcrf", Imm3, Imm3, 19, 0, 0, 0, 0>; +def MFSPR : PPC32InstPattern28 <"mfspr", Gpr, Spr, 31, 678, 0, 0>; +def MTSPR : PPC32InstPattern29 <"mtspr", Spr, Gpr, 31, 934, 0, 0>; +def MTCRF : PPC32InstPattern30 <"mtcrf", Imm8, Gpr, 31, 0, 288, 0, 0>; +def MCRXR : PPC32InstPattern31 <"mcrxr", Imm3, 31, 1024, 0, 0>; +def MFCR : PPC32InstPattern32 <"mfcr", Gpr, Imm8, 31, 0, 38, 0, 0>; +def MFXER : PPC32InstPattern21 <"mfxer", Gpr, 31, 66214, 0, 0>; +def MFLR : PPC32InstPattern21 <"mflr", Gpr, 31, 524966, 0, 0>; +def MFCTR : PPC32InstPattern21 <"mfctr", Gpr, 31, 590502, 0, 0>; +def MTXER : PPC32InstPattern21 <"mtxer", Gpr, 31, 66470, 0, 0>; +def MTLR : PPC32InstPattern21 <"mtlr", Gpr, 31, 525222, 0, 0>; +def MTCTR : PPC32InstPattern21 <"mtctr", Gpr, 31, 590758, 0, 0>; +def MFMQ : PPC32InstPattern21 <"mfmq", Gpr, 31, 678, 0, 0>; +def MFRTCL : PPC32InstPattern21 <"mfrtcl", Gpr, 31, 328358, 0, 0>; +def MFRTCU : PPC32InstPattern21 <"mfrtcu", Gpr, 31, 262822, 0, 0>; +def MTMQ : PPC32InstPattern21 <"mtmq", Gpr, 31, 934, 0, 0>; +def MTRTCL : PPC32InstPattern21 <"mtrtcl", Gpr, 31, 328614, 0, 0>; +def MTRTCU : PPC32InstPattern21 <"mtrtcu", Gpr, 31, 263078, 0, 0>; +def MULLW : PPC32InstPattern2 <"mullw", Gpr, Gpr, Gpr, 31, 470, 0, 0>; +def MULLWo : PPC32InstPattern2 <"mullw.", Gpr, Gpr, Gpr, 31, 471, 0, 0>; +def MULLWO : PPC32InstPattern2 <"mullwo", Gpr, Gpr, Gpr, 31, 470, 0, 0>; +def MULLWOo : PPC32InstPattern2 <"mullwo.", Gpr, Gpr, Gpr, 31, 471, 0, 0>; +def MULHD : PPC32InstPattern2 <"mulhd", Gpr, Gpr, Gpr, 31, 146, 1, 0>; +def MULHDo : PPC32InstPattern2 <"mulhd.", Gpr, Gpr, Gpr, 31, 147, 1, 0>; +def MULHW : PPC32InstPattern2 <"mulhw", Gpr, Gpr, Gpr, 31, 150, 0, 0>; +def MULHWo : PPC32InstPattern2 <"mulhw.", Gpr, Gpr, Gpr, 31, 151, 0, 0>; +def MULHDU : PPC32InstPattern2 <"mulhdu", Gpr, Gpr, Gpr, 31, 18, 1, 0>; +def MULHDUo : PPC32InstPattern2 <"mulhdu.", Gpr, Gpr, Gpr, 31, 19, 1, 0>; +def MULHWU : PPC32InstPattern2 <"mulhwu", Gpr, Gpr, Gpr, 31, 22, 0, 0>; +def MULHWUo : PPC32InstPattern2 <"mulhwu.", Gpr, Gpr, Gpr, 31, 23, 0, 0>; +def MULLD : PPC32InstPattern2 <"mulld", Gpr, Gpr, Gpr, 31, 466, 1, 0>; +def MULLDo : PPC32InstPattern2 <"mulld.", Gpr, Gpr, Gpr, 31, 467, 1, 0>; +def MULLDO : PPC32InstPattern2 <"mulldo", Gpr, Gpr, Gpr, 31, 466, 1, 0>; +def MULLDOo : PPC32InstPattern2 <"mulldo.", Gpr, Gpr, Gpr, 31, 467, 1, 0>; +def NAND : PPC32InstPattern5 <"nand", Gpr, Gpr, Gpr, 31, 952, 0, 0>; +def NANDo : PPC32InstPattern5 <"nand.", Gpr, Gpr, Gpr, 31, 953, 0, 0>; +def NEG : PPC32InstPattern3 <"neg", Gpr, Gpr, 31, 208, 0, 0>; +def NEGo : PPC32InstPattern3 <"neg.", Gpr, Gpr, 31, 209, 0, 0>; +def NEGO : PPC32InstPattern3 <"nego", Gpr, Gpr, 31, 1232, 0, 0>; +def NEGOo : PPC32InstPattern3 <"nego.", Gpr, Gpr, 31, 1233, 0, 0>; +def NOR : PPC32InstPattern5 <"nor", Gpr, Gpr, Gpr, 31, 248, 0, 0>; +def NORo : PPC32InstPattern5 <"nor.", Gpr, Gpr, Gpr, 31, 249, 0, 0>; +def NOP : PPC32InstPattern33 <"nop", 1610612736, 0, 0>; +def ORI : PPC32InstPattern4 <"ori", Gpr, Gpr, Zimm16, 24, 0, 0>; +def ORIS : PPC32InstPattern4 <"oris", Gpr, Gpr, Zimm16, 25, 0, 0>; +def OR : PPC32InstPattern5 <"or", Gpr, Gpr, Gpr, 31, 888, 0, 0>; +def ORo : PPC32InstPattern5 <"or.", Gpr, Gpr, Gpr, 31, 889, 0, 0>; +def ORC : PPC32InstPattern5 <"orc", Gpr, Gpr, Gpr, 31, 824, 0, 0>; +def ORCo : PPC32InstPattern5 <"orc.", Gpr, Gpr, Gpr, 31, 825, 0, 0>; +def RLDICL : PPC32InstPattern17 <"rldicl", Gpr, Gpr, 30, 0, 1, 0>; +def RLDICLo : PPC32InstPattern17 <"rldicl.", Gpr, Gpr, 30, 1, 1, 0>; +def RLDICR : PPC32InstPattern17 <"rldicr", Gpr, Gpr, 30, 4, 1, 0>; +def RLDICRo : PPC32InstPattern17 <"rldicr.", Gpr, Gpr, 30, 5, 1, 0>; +def RLDIC : PPC32InstPattern17 <"rldic", Gpr, Gpr, 30, 8, 1, 0>; +def RLDICo : PPC32InstPattern17 <"rldic.", Gpr, Gpr, 30, 9, 1, 0>; +def RLDIMI : PPC32InstPattern17 <"rldimi", Gpr, Gpr, 30, 12, 1, 0>; +def RLDIMIo : PPC32InstPattern17 <"rldimi.", Gpr, Gpr, 30, 13, 1, 0>; +def RLDCL : PPC32InstPattern5 <"rldcl", Gpr, Gpr, Gpr, 30, 16, 1, 0>; +def RLDCLo : PPC32InstPattern5 <"rldcl.", Gpr, Gpr, Gpr, 30, 17, 1, 0>; +def RLDCR : PPC32InstPattern5 <"rldcr", Gpr, Gpr, Gpr, 30, 18, 1, 0>; +def RLDCRo : PPC32InstPattern5 <"rldcr.", Gpr, Gpr, Gpr, 30, 19, 1, 0>; +def RLWINM : PPC32InstPattern34 <"rlwinm", Gpr, Gpr, Imm5, Imm5, Imm5, 21, 0, 0, 0>; +def RLWINMo : PPC32InstPattern34 <"rlwinm.", Gpr, Gpr, Imm5, Imm5, Imm5, 21, 0, 0, 0>; +def RLWNM : PPC32InstPattern34 <"rlwnm", Gpr, Gpr, Gpr, Imm5, Imm5, 23, 0, 0, 0>; +def RLWNMo : PPC32InstPattern34 <"rlwnm.", Gpr, Gpr, Gpr, Imm5, Imm5, 23, 0, 0, 0>; +def RLWIMI : PPC32InstPattern34 <"rlwimi", Gpr, Gpr, Imm5, Imm5, Imm5, 20, 0, 0, 0>; +def RLWIMIo : PPC32InstPattern34 <"rlwimi.", Gpr, Gpr, Imm5, Imm5, Imm5, 20, 0, 0, 0>; +def SC : PPC32InstPattern33 <"sc", 1140850690, 0, 0>; +def RFID : PPC32InstPattern33 <"rfid", 1275068452, 1, 0>; +def SLW : PPC32InstPattern5 <"slw", Gpr, Gpr, Gpr, 31, 48, 0, 0>; +def SLWo : PPC32InstPattern5 <"slw.", Gpr, Gpr, Gpr, 31, 49, 0, 0>; +def SLD : PPC32InstPattern5 <"sld", Gpr, Gpr, Gpr, 31, 54, 1, 0>; +def SLDo : PPC32InstPattern5 <"sld.", Gpr, Gpr, Gpr, 31, 55, 1, 0>; +def SRW : PPC32InstPattern5 <"srw", Gpr, Gpr, Gpr, 31, 48, 0, 0>; +def SRWo : PPC32InstPattern5 <"srw.", Gpr, Gpr, Gpr, 31, 49, 0, 0>; +def SRD : PPC32InstPattern5 <"srd", Gpr, Gpr, Gpr, 31, 54, 1, 0>; +def SRDo : PPC32InstPattern5 <"srd.", Gpr, Gpr, Gpr, 31, 55, 1, 0>; +def SRAWI : PPC32InstPattern5 <"srawi", Gpr, Gpr, Imm5, 31, 624, 0, 0>; +def SRAWIo : PPC32InstPattern5 <"srawi.", Gpr, Gpr, Imm5, 31, 625, 0, 0>; +def SRADI : PPC32InstPattern17 <"sradi", Gpr, Gpr, 31, 1652, 1, 0>; +def SRADIo : PPC32InstPattern17 <"sradi.", Gpr, Gpr, 31, 1653, 1, 0>; +def SRAW : PPC32InstPattern5 <"sraw", Gpr, Gpr, Gpr, 31, 560, 0, 0>; +def SRAWo : PPC32InstPattern5 <"sraw.", Gpr, Gpr, Gpr, 31, 561, 0, 0>; +def SRAD : PPC32InstPattern5 <"srad", Gpr, Gpr, Gpr, 31, 564, 1, 0>; +def SRADo : PPC32InstPattern5 <"srad.", Gpr, Gpr, Gpr, 31, 565, 1, 0>; +def STB : PPC32InstPattern25 <"stb", Gpr, Disimm16, Gpr0, 38, 0, 0>; +def STBU : PPC32InstPattern25 <"stbu", Gpr, Disimm16, Gpr, 39, 0, 0>; +def STBX : PPC32InstPattern2 <"stbx", Gpr, Gpr0, Gpr, 31, 430, 0, 0>; +def STBUX : PPC32InstPattern2 <"stbux", Gpr, Gpr, Gpr, 31, 494, 0, 0>; +def STH : PPC32InstPattern25 <"sth", Gpr, Disimm16, Gpr0, 44, 0, 0>; +def STHU : PPC32InstPattern25 <"sthu", Gpr, Disimm16, Gpr, 45, 0, 0>; +def STHX : PPC32InstPattern2 <"sthx", Gpr, Gpr0, Gpr, 31, 814, 0, 0>; +def STHUX : PPC32InstPattern2 <"sthux", Gpr, Gpr, Gpr, 31, 878, 0, 0>; +def STW : PPC32InstPattern25 <"stw", Gpr, Disimm16, Gpr0, 36, 0, 0>; +def STWU : PPC32InstPattern25 <"stwu", Gpr, Disimm16, Gpr, 37, 0, 0>; +def STWX : PPC32InstPattern2 <"stwx", Gpr, Gpr0, Gpr, 31, 302, 0, 0>; +def STWUX : PPC32InstPattern2 <"stwux", Gpr, Gpr, Gpr, 31, 366, 0, 0>; +def STD : PPC32InstPattern26 <"std", Gpr, Disimm14, Gpr0, 62, 0, 1, 0>; +def STDU : PPC32InstPattern26 <"stdu", Gpr, Disimm14, Gpr, 62, 1, 1, 0>; +def STDX : PPC32InstPattern2 <"stdx", Gpr, Gpr0, Gpr, 31, 298, 1, 0>; +def STDUX : PPC32InstPattern2 <"stdux", Gpr, Gpr, Gpr, 31, 362, 1, 0>; +def STHBRX : PPC32InstPattern2 <"sthbrx", Gpr, Gpr0, Gpr, 31, 812, 0, 0>; +def STWBRX : PPC32InstPattern2 <"stwbrx", Gpr, Gpr0, Gpr, 31, 300, 0, 0>; +def STSWX : PPC32InstPattern2 <"stswx", Gpr, Gpr0, Gpr, 31, 298, 0, 0>; +def STWCXo : PPC32InstPattern2 <"stwcx.", Gpr, Gpr0, Gpr, 31, 301, 0, 0>; +def STDCXo : PPC32InstPattern2 <"stdcx.", Gpr, Gpr0, Gpr, 31, 429, 1, 0>; +def STSWI : PPC32InstPattern2 <"stswi", Gpr, Gpr0, Imm5, 31, 426, 0, 0>; +def STFIWX : PPC32InstPattern2 <"stfiwx", Fpr, Gpr0, Gpr, 31, 942, 0, 0>; +def STFS : PPC32InstPattern25 <"stfs", Fpr, Disimm16, Gpr0, 52, 0, 0>; +def STFSU : PPC32InstPattern25 <"stfsu", Fpr, Disimm16, Gpr, 53, 0, 0>; +def STFSX : PPC32InstPattern2 <"stfsx", Fpr, Gpr0, Gpr, 31, 302, 0, 0>; +def STFSUX : PPC32InstPattern2 <"stfsux", Fpr, Gpr, Gpr, 31, 366, 0, 0>; +def STFD : PPC32InstPattern25 <"stfd", Fpr, Disimm16, Gpr0, 54, 0, 0>; +def STFDU : PPC32InstPattern25 <"stfdu", Fpr, Disimm16, Gpr, 55, 0, 0>; +def STFDX : PPC32InstPattern2 <"stfdx", Fpr, Gpr0, Gpr, 31, 430, 0, 0>; +def STFDUX : PPC32InstPattern2 <"stfdux", Fpr, Gpr, Gpr, 31, 494, 0, 0>; +def SUBFIC : PPC32InstPattern0 <"subfic", Gpr, Gpr, Simm16, 8, 0, 0>; +def SUB : PPC32InstPattern35 <"sub", Gpr, Gpr, Gpr, 31, 80, 0, 0>; +def SUBo : PPC32InstPattern35 <"sub.", Gpr, Gpr, Gpr, 31, 81, 0, 0>; +def SUBO : PPC32InstPattern35 <"subo", Gpr, Gpr, Gpr, 31, 80, 0, 0>; +def SUBOo : PPC32InstPattern35 <"subo.", Gpr, Gpr, Gpr, 31, 81, 0, 0>; +def SUBF : PPC32InstPattern2 <"subf", Gpr, Gpr, Gpr, 31, 80, 0, 0>; +def SUBFo : PPC32InstPattern2 <"subf.", Gpr, Gpr, Gpr, 31, 81, 0, 0>; +def SUBFO : PPC32InstPattern2 <"subfo", Gpr, Gpr, Gpr, 31, 80, 0, 0>; +def SUBFOo : PPC32InstPattern2 <"subfo.", Gpr, Gpr, Gpr, 31, 81, 0, 0>; +def SUBC : PPC32InstPattern35 <"subc", Gpr, Gpr, Gpr, 31, 16, 0, 0>; +def SUBCo : PPC32InstPattern35 <"subc.", Gpr, Gpr, Gpr, 31, 17, 0, 0>; +def SUBCO : PPC32InstPattern35 <"subco", Gpr, Gpr, Gpr, 31, 16, 0, 0>; +def SUBCOo : PPC32InstPattern35 <"subco.", Gpr, Gpr, Gpr, 31, 17, 0, 0>; +def SUBFC : PPC32InstPattern2 <"subfc", Gpr, Gpr, Gpr, 31, 16, 0, 0>; +def SUBFCo : PPC32InstPattern2 <"subfc.", Gpr, Gpr, Gpr, 31, 17, 0, 0>; +def SUBFCO : PPC32InstPattern2 <"subfco", Gpr, Gpr, Gpr, 31, 16, 0, 0>; +def SUBFCOo : PPC32InstPattern2 <"subfco.", Gpr, Gpr, Gpr, 31, 17, 0, 0>; +def SUBFE : PPC32InstPattern2 <"subfe", Gpr, Gpr, Gpr, 31, 272, 0, 0>; +def SUBFEo : PPC32InstPattern2 <"subfe.", Gpr, Gpr, Gpr, 31, 273, 0, 0>; +def SUBFEO : PPC32InstPattern2 <"subfeo", Gpr, Gpr, Gpr, 31, 272, 0, 0>; +def SUBFEOo : PPC32InstPattern2 <"subfeo.", Gpr, Gpr, Gpr, 31, 273, 0, 0>; +def SUBFME : PPC32InstPattern3 <"subfme", Gpr, Gpr, 31, 464, 0, 0>; +def SUBFMEo : PPC32InstPattern3 <"subfme.", Gpr, Gpr, 31, 465, 0, 0>; +def SUBFMEO : PPC32InstPattern3 <"subfmeo", Gpr, Gpr, 31, 1488, 0, 0>; +def SUBFMEOo : PPC32InstPattern3 <"subfmeo.", Gpr, Gpr, 31, 1489, 0, 0>; +def SUBFZE : PPC32InstPattern3 <"subfze", Gpr, Gpr, 31, 400, 0, 0>; +def SUBFZEo : PPC32InstPattern3 <"subfze.", Gpr, Gpr, 31, 401, 0, 0>; +def SUBFZEO : PPC32InstPattern3 <"subfzeo", Gpr, Gpr, 31, 1424, 0, 0>; +def SUBFZEOo : PPC32InstPattern3 <"subfzeo.", Gpr, Gpr, 31, 1425, 0, 0>; +def SYNC : PPC32InstPattern36 <"sync", Imm2, 248, 1196, 0, 0>; +def LWSYNC : PPC32InstPattern33 <"lwsync", 2082473132, 0, 0>; +def PTESYNC : PPC32InstPattern33 <"ptesync", 2084570284, 0, 0>; +def TDI : PPC32InstPattern0 <"tdi", Imm5, Gpr, Simm16, 2, 1, 0>; +def TDLTI : PPC32InstPattern37 <"tdlti", Gpr, Simm16, 80, 1, 0>; +def TDLEI : PPC32InstPattern37 <"tdlei", Gpr, Simm16, 84, 1, 0>; +def TDEQI : PPC32InstPattern37 <"tdeqi", Gpr, Simm16, 68, 1, 0>; +def TDGEI : PPC32InstPattern37 <"tdgei", Gpr, Simm16, 76, 1, 0>; +def TDGTI : PPC32InstPattern37 <"tdgti", Gpr, Simm16, 72, 1, 0>; +def TDNLI : PPC32InstPattern37 <"tdnli", Gpr, Simm16, 76, 1, 0>; +def TDNEI : PPC32InstPattern37 <"tdnei", Gpr, Simm16, 88, 1, 0>; +def TDNGI : PPC32InstPattern37 <"tdngi", Gpr, Simm16, 84, 1, 0>; +def TDLLTI : PPC32InstPattern37 <"tdllti", Gpr, Simm16, 66, 1, 0>; +def TDLLEI : PPC32InstPattern37 <"tdllei", Gpr, Simm16, 70, 1, 0>; +def TDLGEI : PPC32InstPattern37 <"tdlgei", Gpr, Simm16, 69, 1, 0>; +def TDLGTI : PPC32InstPattern37 <"tdlgti", Gpr, Simm16, 65, 1, 0>; +def TDLNLI : PPC32InstPattern37 <"tdlnli", Gpr, Simm16, 69, 1, 0>; +def TDLNGI : PPC32InstPattern37 <"tdlngi", Gpr, Simm16, 70, 1, 0>; +def TD : PPC32InstPattern2 <"td", Imm5, Gpr, Gpr, 31, 136, 1, 0>; +def TDLT : PPC32InstPattern38 <"tdlt", Gpr, Gpr, 1008, 136, 1, 0>; +def TDLE : PPC32InstPattern38 <"tdle", Gpr, Gpr, 1012, 136, 1, 0>; +def TDEQ : PPC32InstPattern38 <"tdeq", Gpr, Gpr, 996, 136, 1, 0>; +def TDGE : PPC32InstPattern38 <"tdge", Gpr, Gpr, 1004, 136, 1, 0>; +def TDGT : PPC32InstPattern38 <"tdgt", Gpr, Gpr, 1000, 136, 1, 0>; +def TDNL : PPC32InstPattern38 <"tdnl", Gpr, Gpr, 1004, 136, 1, 0>; +def TDNE : PPC32InstPattern38 <"tdne", Gpr, Gpr, 1016, 136, 1, 0>; +def TDNG : PPC32InstPattern38 <"tdng", Gpr, Gpr, 1012, 136, 1, 0>; +def TDLLT : PPC32InstPattern38 <"tdllt", Gpr, Gpr, 994, 136, 1, 0>; +def TDLLE : PPC32InstPattern38 <"tdlle", Gpr, Gpr, 998, 136, 1, 0>; +def TDLGE : PPC32InstPattern38 <"tdlge", Gpr, Gpr, 997, 136, 1, 0>; +def TDLGT : PPC32InstPattern38 <"tdlgt", Gpr, Gpr, 993, 136, 1, 0>; +def TDLNL : PPC32InstPattern38 <"tdlnl", Gpr, Gpr, 997, 136, 1, 0>; +def TDLNG : PPC32InstPattern38 <"tdlng", Gpr, Gpr, 998, 136, 1, 0>; +def TWI : PPC32InstPattern0 <"twi", Imm5, Gpr, Simm16, 3, 0, 0>; +def TWLTI : PPC32InstPattern37 <"twlti", Gpr, Simm16, 112, 0, 0>; +def TWLEI : PPC32InstPattern37 <"twlei", Gpr, Simm16, 116, 0, 0>; +def TWEQI : PPC32InstPattern37 <"tweqi", Gpr, Simm16, 100, 0, 0>; +def TWGEI : PPC32InstPattern37 <"twgei", Gpr, Simm16, 108, 0, 0>; +def TWGTI : PPC32InstPattern37 <"twgti", Gpr, Simm16, 104, 0, 0>; +def TWNLI : PPC32InstPattern37 <"twnli", Gpr, Simm16, 108, 0, 0>; +def TWNEI : PPC32InstPattern37 <"twnei", Gpr, Simm16, 120, 0, 0>; +def TWNGI : PPC32InstPattern37 <"twngi", Gpr, Simm16, 116, 0, 0>; +def TWLLTI : PPC32InstPattern37 <"twllti", Gpr, Simm16, 98, 0, 0>; +def TWLLEI : PPC32InstPattern37 <"twllei", Gpr, Simm16, 102, 0, 0>; +def TWLGEI : PPC32InstPattern37 <"twlgei", Gpr, Simm16, 101, 0, 0>; +def TWLGTI : PPC32InstPattern37 <"twlgti", Gpr, Simm16, 97, 0, 0>; +def TWLNLI : PPC32InstPattern37 <"twlnli", Gpr, Simm16, 101, 0, 0>; +def TWLNGI : PPC32InstPattern37 <"twlngi", Gpr, Simm16, 102, 0, 0>; +def TW : PPC32InstPattern2 <"tw", Imm5, Gpr, Gpr, 31, 8, 0, 0>; +def TWLT : PPC32InstPattern38 <"twlt", Gpr, Gpr, 1008, 8, 0, 0>; +def TWLE : PPC32InstPattern38 <"twle", Gpr, Gpr, 1012, 8, 0, 0>; +def TWEQ : PPC32InstPattern38 <"tweq", Gpr, Gpr, 996, 8, 0, 0>; +def TWGE : PPC32InstPattern38 <"twge", Gpr, Gpr, 1004, 8, 0, 0>; +def TWGT : PPC32InstPattern38 <"twgt", Gpr, Gpr, 1000, 8, 0, 0>; +def TWNL : PPC32InstPattern38 <"twnl", Gpr, Gpr, 1004, 8, 0, 0>; +def TWNE : PPC32InstPattern38 <"twne", Gpr, Gpr, 1016, 8, 0, 0>; +def TWNG : PPC32InstPattern38 <"twng", Gpr, Gpr, 1012, 8, 0, 0>; +def TWLLT : PPC32InstPattern38 <"twllt", Gpr, Gpr, 994, 8, 0, 0>; +def TWLLE : PPC32InstPattern38 <"twlle", Gpr, Gpr, 998, 8, 0, 0>; +def TWLGE : PPC32InstPattern38 <"twlge", Gpr, Gpr, 997, 8, 0, 0>; +def TWLGT : PPC32InstPattern38 <"twlgt", Gpr, Gpr, 993, 8, 0, 0>; +def TWLNL : PPC32InstPattern38 <"twlnl", Gpr, Gpr, 997, 8, 0, 0>; +def TWLNG : PPC32InstPattern38 <"twlng", Gpr, Gpr, 998, 8, 0, 0>; +def TRAP : PPC32InstPattern33 <"trap", 2145386504, 0, 0>; +def XORI : PPC32InstPattern4 <"xori", Gpr, Gpr, Zimm16, 26, 0, 0>; +def XORIS : PPC32InstPattern4 <"xoris", Gpr, Gpr, Zimm16, 27, 0, 0>; +def XOR : PPC32InstPattern5 <"xor", Gpr, Gpr, Gpr, 31, 632, 0, 0>; +def XORo : PPC32InstPattern5 <"xor.", Gpr, Gpr, Gpr, 31, 633, 0, 0>; +def ICBI : PPC32InstPattern38 <"icbi", Gpr0, Gpr, 992, 940, 0, 0>; +def ISYNC : PPC32InstPattern33 <"isync", 1275068716, 0, 0>; +def DCBT : PPC32InstPattern39 <"dcbt", Gpr0, Gpr, Imm4, 62, 556, 1, 0>; +def DCBTST : PPC32InstPattern38 <"dcbtst", Gpr0, Gpr, 992, 492, 0, 0>; +def DCBT128 : PPC32InstPattern39 <"dcbt128", Gpr0, Gpr, Imm4, 62, 556, 1, 0>; +def DCBZ : PPC32InstPattern38 <"dcbz", Gpr0, Gpr, 992, 1004, 0, 0>; +def DCBZL : PPC32InstPattern38 <"dcbzl", Gpr0, Gpr, 993, 1004, 1, 0>; +def DCBZ128 : PPC32InstPattern38 <"dcbz128", Gpr0, Gpr, 993, 1004, 1, 0>; +def DCBST : PPC32InstPattern38 <"dcbst", Gpr0, Gpr, 992, 108, 0, 0>; +def DCBF : PPC32InstPattern38 <"dcbf", Gpr0, Gpr, 992, 172, 0, 0>; +def ECIWX : PPC32InstPattern2 <"eciwx", Gpr, Gpr0, Gpr, 31, 620, 0, 0>; +def ECOWX : PPC32InstPattern2 <"ecowx", Gpr, Gpr0, Gpr, 31, 876, 0, 0>; +def EIEIO : PPC32InstPattern33 <"eieio", 2080376492, 0, 0>; +def RFI : PPC32InstPattern33 <"rfi", 1275068516, 0, 0>; +def MTMSR : PPC32InstPattern21 <"mtmsr", Gpr, 31, 292, 0, 0>; +def MTMSRD : PPC32InstPattern40 <"mtmsrd", Gpr, Imm1, 31, 0, 356, 1, 0>; +def MFMSR : PPC32InstPattern21 <"mfmsr", Gpr, 31, 166, 0, 0>; +def DCBA : PPC32InstPattern38 <"dcba", Gpr0, Gpr, 992, 492, 0, 0>; +def DCBI : PPC32InstPattern38 <"dcbi", Gpr0, Gpr, 992, 940, 0, 0>; +def MTSR : PPC32InstPattern41 <"mtsr", Sgr, Gpr, 31, 0, 420, 0, 0>; +def MFSR : PPC32InstPattern42 <"mfsr", Gpr, Sgr, 31, 0, 1190, 0, 0>; +def MTSRIN : PPC32InstPattern20 <"mtsrin", Gpr, Gpr, 31, 0, 484, 0, 0>; +def MFSRIN : PPC32InstPattern20 <"mfsrin", Gpr, Gpr, 31, 0, 294, 0, 0>; +def SLBIE : PPC32InstPattern43 <"slbie", Gpr, 31744, 868, 1, 0>; +def SLBIA : PPC32InstPattern33 <"slbia", 2080375780, 1, 0>; +def SLBMTE : PPC32InstPattern20 <"slbmte", Gpr, Gpr, 31, 0, 804, 1, 0>; +def SLBMFEV : PPC32InstPattern20 <"slbmfev", Gpr, Gpr, 31, 0, 678, 1, 0>; +def SLBMFEE : PPC32InstPattern20 <"slbmfee", Gpr, Gpr, 31, 0, 806, 1, 0>; +def TLBIE : PPC32InstPattern44 <"tlbie", Gpr, Imm1, 496, 0, 612, 1, 0>; +def TLBIEL : PPC32InstPattern43 <"tlbiel", Gpr, 31744, 548, 1, 0>; +def TLBIA : PPC32InstPattern33 <"tlbia", 2080375524, 0, 0>; +def TLBSYNC : PPC32InstPattern33 <"tlbsync", 2080375916, 0, 0>; +def MTTBL : PPC32InstPattern21 <"mttbl", Gpr, 31, 803750, 0, 0>; +def MTTBU : PPC32InstPattern21 <"mttbu", Gpr, 31, 869286, 0, 0>; +def MFTB : PPC32InstPattern28 <"mftb", Gpr, Spr, 31, 742, 0, 0>; +def MFTBU : PPC32InstPattern21 <"mftbu", Gpr, 31, 869094, 0, 0>; +def ATTN : PPC32InstPattern45 <"attn", Imm15, 0, 512, 0, 0>; +def MULLI : PPC32InstPattern0 <"mulli", Gpr, Gpr, Simm16, 7, 0, 0>; +def TLBLD : PPC32InstPattern43 <"tlbld", Gpr, 31744, 932, 0, 0>; +def TLBLI : PPC32InstPattern43 <"tlbli", Gpr, 31744, 996, 0, 0>; +def LVEBX : PPC32InstPattern2 <"lvebx", Vpr, Gpr0, Gpr, 31, 14, 0, 1>; +def LVEHX : PPC32InstPattern2 <"lvehx", Vpr, Gpr0, Gpr, 31, 78, 0, 1>; +def LVEWX : PPC32InstPattern2 <"lvewx", Vpr, Gpr0, Gpr, 31, 142, 0, 1>; +def LVX : PPC32InstPattern2 <"lvx", Vpr, Gpr0, Gpr, 31, 206, 0, 1>; +def LVXL : PPC32InstPattern2 <"lvxl", Vpr, Gpr0, Gpr, 31, 718, 0, 1>; +def STVEBX : PPC32InstPattern2 <"stvebx", Vpr, Gpr0, Gpr, 31, 270, 0, 1>; +def STVEHX : PPC32InstPattern2 <"stvehx", Vpr, Gpr0, Gpr, 31, 334, 0, 1>; +def STVEWX : PPC32InstPattern2 <"stvewx", Vpr, Gpr0, Gpr, 31, 398, 0, 1>; +def STVX : PPC32InstPattern2 <"stvx", Vpr, Gpr0, Gpr, 31, 462, 0, 1>; +def STVXL : PPC32InstPattern2 <"stvxl", Vpr, Gpr0, Gpr, 31, 974, 0, 1>; +def LVSL : PPC32InstPattern2 <"lvsl", Vpr, Gpr0, Gpr, 31, 12, 0, 1>; +def LVSR : PPC32InstPattern2 <"lvsr", Vpr, Gpr0, Gpr, 31, 76, 0, 1>; +def MTVSCR : PPC32InstPattern43 <"mtvscr", Vpr, 4096, 580, 0, 1>; +def MFVSCR : PPC32InstPattern21 <"mfvscr", Vpr, 4, 1540, 0, 1>; +def DST : PPC32InstPattern46 <"dst", Gpr, Gpr, Imm2, 248, 684, 0, 1>; +def DSTT : PPC32InstPattern46 <"dstt", Gpr, Gpr, Imm2, 252, 684, 0, 1>; +def DSTST : PPC32InstPattern46 <"dstst", Gpr, Gpr, Imm2, 248, 748, 0, 1>; +def DSTSTT : PPC32InstPattern46 <"dststt", Gpr, Gpr, Imm2, 252, 748, 0, 1>; +def DSS : PPC32InstPattern36 <"dss", Imm2, 248, 1644, 0, 1>; +def DSSALL : PPC32InstPattern33 <"dssall", 2113930860, 0, 1>; +def VADDUBM : PPC32InstPattern2 <"vaddubm", Vpr, Vpr, Vpr, 4, 0, 0, 1>; +def VADDUBS : PPC32InstPattern2 <"vaddubs", Vpr, Vpr, Vpr, 4, 512, 0, 1>; +def VADDSBS : PPC32InstPattern2 <"vaddsbs", Vpr, Vpr, Vpr, 4, 768, 0, 1>; +def VADDUHM : PPC32InstPattern2 <"vadduhm", Vpr, Vpr, Vpr, 4, 64, 0, 1>; +def VADDUHS : PPC32InstPattern2 <"vadduhs", Vpr, Vpr, Vpr, 4, 576, 0, 1>; +def VADDSHS : PPC32InstPattern2 <"vaddshs", Vpr, Vpr, Vpr, 4, 832, 0, 1>; +def VADDUWM : PPC32InstPattern2 <"vadduwm", Vpr, Vpr, Vpr, 4, 128, 0, 1>; +def VADDUWS : PPC32InstPattern2 <"vadduws", Vpr, Vpr, Vpr, 4, 640, 0, 1>; +def VADDSWS : PPC32InstPattern2 <"vaddsws", Vpr, Vpr, Vpr, 4, 896, 0, 1>; +def VADDFP : PPC32InstPattern2 <"vaddfp", Vpr, Vpr, Vpr, 4, 10, 0, 1>; +def VADDCUW : PPC32InstPattern2 <"vaddcuw", Vpr, Vpr, Vpr, 4, 384, 0, 1>; +def VSUBUBM : PPC32InstPattern2 <"vsububm", Vpr, Vpr, Vpr, 4, 0, 0, 1>; +def VSUBUBS : PPC32InstPattern2 <"vsububs", Vpr, Vpr, Vpr, 4, 512, 0, 1>; +def VSUBSBS : PPC32InstPattern2 <"vsubsbs", Vpr, Vpr, Vpr, 4, 768, 0, 1>; +def VSUBUHM : PPC32InstPattern2 <"vsubuhm", Vpr, Vpr, Vpr, 4, 64, 0, 1>; +def VSUBUHS : PPC32InstPattern2 <"vsubuhs", Vpr, Vpr, Vpr, 4, 576, 0, 1>; +def VSUBSHS : PPC32InstPattern2 <"vsubshs", Vpr, Vpr, Vpr, 4, 832, 0, 1>; +def VSUBUWM : PPC32InstPattern2 <"vsubuwm", Vpr, Vpr, Vpr, 4, 128, 0, 1>; +def VSUBUWS : PPC32InstPattern2 <"vsubuws", Vpr, Vpr, Vpr, 4, 640, 0, 1>; +def VSUBSWS : PPC32InstPattern2 <"vsubsws", Vpr, Vpr, Vpr, 4, 896, 0, 1>; +def VSUBFP : PPC32InstPattern2 <"vsubfp", Vpr, Vpr, Vpr, 4, 74, 0, 1>; +def VSUBCUW : PPC32InstPattern2 <"vsubcuw", Vpr, Vpr, Vpr, 4, 384, 0, 1>; +def VMULOUB : PPC32InstPattern2 <"vmuloub", Vpr, Vpr, Vpr, 4, 8, 0, 1>; +def VMULOSB : PPC32InstPattern2 <"vmulosb", Vpr, Vpr, Vpr, 4, 264, 0, 1>; +def VMULOUH : PPC32InstPattern2 <"vmulouh", Vpr, Vpr, Vpr, 4, 72, 0, 1>; +def VMULOSH : PPC32InstPattern2 <"vmulosh", Vpr, Vpr, Vpr, 4, 328, 0, 1>; +def VMULEUB : PPC32InstPattern2 <"vmuleub", Vpr, Vpr, Vpr, 4, 520, 0, 1>; +def VMULESB : PPC32InstPattern2 <"vmulesb", Vpr, Vpr, Vpr, 4, 776, 0, 1>; +def VMULEUH : PPC32InstPattern2 <"vmuleuh", Vpr, Vpr, Vpr, 4, 584, 0, 1>; +def VMULESH : PPC32InstPattern2 <"vmulesh", Vpr, Vpr, Vpr, 4, 840, 0, 1>; +def VMHADDSHS : PPC32InstPattern47 <"vmhaddshs", Vpr, Vpr, Vpr, Vpr, 4, 0, 0, 1>; +def VMHRADDSHS : PPC32InstPattern47 <"vmhraddshs", Vpr, Vpr, Vpr, Vpr, 4, 1, 0, 1>; +def VMLADDUHM : PPC32InstPattern47 <"vmladduhm", Vpr, Vpr, Vpr, Vpr, 4, 2, 0, 1>; +def VMADDFP : PPC32InstPattern19 <"vmaddfp", Vpr, Vpr, Vpr, Vpr, 4, 14, 0, 1>; +def VMSUMUBM : PPC32InstPattern47 <"vmsumubm", Vpr, Vpr, Vpr, Vpr, 4, 4, 0, 1>; +def VMSUMMBM : PPC32InstPattern47 <"vmsummbm", Vpr, Vpr, Vpr, Vpr, 4, 5, 0, 1>; +def VMSUMUHM : PPC32InstPattern47 <"vmsumuhm", Vpr, Vpr, Vpr, Vpr, 4, 6, 0, 1>; +def VMSUMUHS : PPC32InstPattern47 <"vmsumuhs", Vpr, Vpr, Vpr, Vpr, 4, 7, 0, 1>; +def VMSUMSHM : PPC32InstPattern47 <"vmsumshm", Vpr, Vpr, Vpr, Vpr, 4, 8, 0, 1>; +def VMSUMSHS : PPC32InstPattern47 <"vmsumshs", Vpr, Vpr, Vpr, Vpr, 4, 9, 0, 1>; +def VSUMSWS : PPC32InstPattern2 <"vsumsws", Vpr, Vpr, Vpr, 4, 904, 0, 1>; +def VSUM2SWS : PPC32InstPattern2 <"vsum2sws", Vpr, Vpr, Vpr, 4, 648, 0, 1>; +def VSUM4UBS : PPC32InstPattern2 <"vsum4ubs", Vpr, Vpr, Vpr, 4, 520, 0, 1>; +def VSUM4SBS : PPC32InstPattern2 <"vsum4sbs", Vpr, Vpr, Vpr, 4, 776, 0, 1>; +def VSUM4SHS : PPC32InstPattern2 <"vsum4shs", Vpr, Vpr, Vpr, 4, 584, 0, 1>; +def VAVGUB : PPC32InstPattern2 <"vavgub", Vpr, Vpr, Vpr, 4, 2, 0, 1>; +def VAVGUH : PPC32InstPattern2 <"vavguh", Vpr, Vpr, Vpr, 4, 66, 0, 1>; +def VAVGUW : PPC32InstPattern2 <"vavguw", Vpr, Vpr, Vpr, 4, 130, 0, 1>; +def VAVGSB : PPC32InstPattern2 <"vavgsb", Vpr, Vpr, Vpr, 4, 258, 0, 1>; +def VAVGSH : PPC32InstPattern2 <"vavgsh", Vpr, Vpr, Vpr, 4, 322, 0, 1>; +def VAVGSW : PPC32InstPattern2 <"vavgsw", Vpr, Vpr, Vpr, 4, 386, 0, 1>; +def VAND : PPC32InstPattern2 <"vand", Vpr, Vpr, Vpr, 4, 4, 0, 1>; +def VOR : PPC32InstPattern2 <"vor", Vpr, Vpr, Vpr, 4, 132, 0, 1>; +def VXOR : PPC32InstPattern2 <"vxor", Vpr, Vpr, Vpr, 4, 196, 0, 1>; +def VANDC : PPC32InstPattern2 <"vandc", Vpr, Vpr, Vpr, 4, 68, 0, 1>; +def VNOR : PPC32InstPattern2 <"vnor", Vpr, Vpr, Vpr, 4, 260, 0, 1>; +def VRLB : PPC32InstPattern2 <"vrlb", Vpr, Vpr, Vpr, 4, 4, 0, 1>; +def VRLH : PPC32InstPattern2 <"vrlh", Vpr, Vpr, Vpr, 4, 68, 0, 1>; +def VRLW : PPC32InstPattern2 <"vrlw", Vpr, Vpr, Vpr, 4, 132, 0, 1>; +def VSLB : PPC32InstPattern2 <"vslb", Vpr, Vpr, Vpr, 4, 260, 0, 1>; +def VSLH : PPC32InstPattern2 <"vslh", Vpr, Vpr, Vpr, 4, 324, 0, 1>; +def VSLW : PPC32InstPattern2 <"vslw", Vpr, Vpr, Vpr, 4, 388, 0, 1>; +def VSL : PPC32InstPattern2 <"vsl", Vpr, Vpr, Vpr, 4, 452, 0, 1>; +def VSRB : PPC32InstPattern2 <"vsrb", Vpr, Vpr, Vpr, 4, 516, 0, 1>; +def VSRAB : PPC32InstPattern2 <"vsrab", Vpr, Vpr, Vpr, 4, 772, 0, 1>; +def VSRH : PPC32InstPattern2 <"vsrh", Vpr, Vpr, Vpr, 4, 580, 0, 1>; +def VSRAH : PPC32InstPattern2 <"vsrah", Vpr, Vpr, Vpr, 4, 836, 0, 1>; +def VSRW : PPC32InstPattern2 <"vsrw", Vpr, Vpr, Vpr, 4, 644, 0, 1>; +def VSRAW : PPC32InstPattern2 <"vsraw", Vpr, Vpr, Vpr, 4, 900, 0, 1>; +def VSR : PPC32InstPattern2 <"vsr", Vpr, Vpr, Vpr, 4, 708, 0, 1>; +def VCMPGTUB : PPC32InstPattern2 <"vcmpgtub", Vpr, Vpr, Vpr, 4, 518, 0, 1>; +def VCMPGTUBo : PPC32InstPattern2 <"vcmpgtub.", Vpr, Vpr, Vpr, 4, 518, 0, 1>; +def VCMPGTSB : PPC32InstPattern2 <"vcmpgtsb", Vpr, Vpr, Vpr, 4, 774, 0, 1>; +def VCMPGTSBo : PPC32InstPattern2 <"vcmpgtsb.", Vpr, Vpr, Vpr, 4, 774, 0, 1>; +def VCMPGTUH : PPC32InstPattern2 <"vcmpgtuh", Vpr, Vpr, Vpr, 4, 582, 0, 1>; +def VCMPGTUHo : PPC32InstPattern2 <"vcmpgtuh.", Vpr, Vpr, Vpr, 4, 582, 0, 1>; +def VCMPGTSH : PPC32InstPattern2 <"vcmpgtsh", Vpr, Vpr, Vpr, 4, 838, 0, 1>; +def VCMPGTSHo : PPC32InstPattern2 <"vcmpgtsh.", Vpr, Vpr, Vpr, 4, 838, 0, 1>; +def VCMPGTUW : PPC32InstPattern2 <"vcmpgtuw", Vpr, Vpr, Vpr, 4, 646, 0, 1>; +def VCMPGTUWo : PPC32InstPattern2 <"vcmpgtuw.", Vpr, Vpr, Vpr, 4, 646, 0, 1>; +def VCMPGTSW : PPC32InstPattern2 <"vcmpgtsw", Vpr, Vpr, Vpr, 4, 902, 0, 1>; +def VCMPGTSWo : PPC32InstPattern2 <"vcmpgtsw.", Vpr, Vpr, Vpr, 4, 902, 0, 1>; +def VCMPGTFP : PPC32InstPattern2 <"vcmpgtfp", Vpr, Vpr, Vpr, 4, 710, 0, 1>; +def VCMPGTFPo : PPC32InstPattern2 <"vcmpgtfp.", Vpr, Vpr, Vpr, 4, 710, 0, 1>; +def VCMPEQUB : PPC32InstPattern2 <"vcmpequb", Vpr, Vpr, Vpr, 4, 6, 0, 1>; +def VCMPEQUBo : PPC32InstPattern2 <"vcmpequb.", Vpr, Vpr, Vpr, 4, 6, 0, 1>; +def VCMPEQUH : PPC32InstPattern2 <"vcmpequh", Vpr, Vpr, Vpr, 4, 70, 0, 1>; +def VCMPEQUHo : PPC32InstPattern2 <"vcmpequh.", Vpr, Vpr, Vpr, 4, 70, 0, 1>; +def VCMPEQUW : PPC32InstPattern2 <"vcmpequw", Vpr, Vpr, Vpr, 4, 134, 0, 1>; +def VCMPEQUWo : PPC32InstPattern2 <"vcmpequw.", Vpr, Vpr, Vpr, 4, 134, 0, 1>; +def VCMPEQFP : PPC32InstPattern2 <"vcmpeqfp", Vpr, Vpr, Vpr, 4, 198, 0, 1>; +def VCMPEQFPo : PPC32InstPattern2 <"vcmpeqfp.", Vpr, Vpr, Vpr, 4, 198, 0, 1>; +def VCMPGEFP : PPC32InstPattern2 <"vcmpgefp", Vpr, Vpr, Vpr, 4, 454, 0, 1>; +def VCMPGEFPo : PPC32InstPattern2 <"vcmpgefp.", Vpr, Vpr, Vpr, 4, 454, 0, 1>; +def VCMPBFP : PPC32InstPattern2 <"vcmpbfp", Vpr, Vpr, Vpr, 4, 966, 0, 1>; +def VCMPBFPo : PPC32InstPattern2 <"vcmpbfp.", Vpr, Vpr, Vpr, 4, 966, 0, 1>; +def VSEL : PPC32InstPattern47 <"vsel", Vpr, Vpr, Vpr, Vpr, 4, 10, 0, 1>; +def VPKUHUM : PPC32InstPattern2 <"vpkuhum", Vpr, Vpr, Vpr, 4, 14, 0, 1>; +def VPKUHUS : PPC32InstPattern2 <"vpkuhus", Vpr, Vpr, Vpr, 4, 142, 0, 1>; +def VPKSHUS : PPC32InstPattern2 <"vpkshus", Vpr, Vpr, Vpr, 4, 270, 0, 1>; +def VPKSHSS : PPC32InstPattern2 <"vpkshss", Vpr, Vpr, Vpr, 4, 398, 0, 1>; +def VPKUWUM : PPC32InstPattern2 <"vpkuwum", Vpr, Vpr, Vpr, 4, 78, 0, 1>; +def VPKUWUS : PPC32InstPattern2 <"vpkuwus", Vpr, Vpr, Vpr, 4, 206, 0, 1>; +def VPKSWUS : PPC32InstPattern2 <"vpkswus", Vpr, Vpr, Vpr, 4, 334, 0, 1>; +def VPKSWSS : PPC32InstPattern2 <"vpkswss", Vpr, Vpr, Vpr, 4, 462, 0, 1>; +def VPKPX : PPC32InstPattern2 <"vpkpx", Vpr, Vpr, Vpr, 4, 782, 0, 1>; +def VUPKHSB : PPC32InstPattern20 <"vupkhsb", Vpr, Vpr, 4, 0, 526, 0, 1>; +def VUPKHSH : PPC32InstPattern20 <"vupkhsh", Vpr, Vpr, 4, 0, 590, 0, 1>; +def VUPKHPX : PPC32InstPattern20 <"vupkhpx", Vpr, Vpr, 4, 0, 846, 0, 1>; +def VUPKLSB : PPC32InstPattern20 <"vupklsb", Vpr, Vpr, 4, 0, 654, 0, 1>; +def VUPKLSH : PPC32InstPattern20 <"vupklsh", Vpr, Vpr, 4, 0, 718, 0, 1>; +def VUPKLPX : PPC32InstPattern20 <"vupklpx", Vpr, Vpr, 4, 0, 974, 0, 1>; +def VMRGHB : PPC32InstPattern2 <"vmrghb", Vpr, Vpr, Vpr, 4, 12, 0, 1>; +def VMRGHH : PPC32InstPattern2 <"vmrghh", Vpr, Vpr, Vpr, 4, 76, 0, 1>; +def VMRGHW : PPC32InstPattern2 <"vmrghw", Vpr, Vpr, Vpr, 4, 140, 0, 1>; +def VMRGLB : PPC32InstPattern2 <"vmrglb", Vpr, Vpr, Vpr, 4, 268, 0, 1>; +def VMRGLH : PPC32InstPattern2 <"vmrglh", Vpr, Vpr, Vpr, 4, 332, 0, 1>; +def VMRGLW : PPC32InstPattern2 <"vmrglw", Vpr, Vpr, Vpr, 4, 396, 0, 1>; +def VSPLTB : PPC32InstPattern35 <"vspltb", Vpr, Vpr, Imm5, 4, 524, 0, 1>; +def VSPLTH : PPC32InstPattern35 <"vsplth", Vpr, Vpr, Imm5, 4, 588, 0, 1>; +def VSPLTW : PPC32InstPattern35 <"vspltw", Vpr, Vpr, Imm5, 4, 652, 0, 1>; +def VSPLTISB : PPC32InstPattern3 <"vspltisb", Vpr, Imm5, 4, 780, 0, 1>; +def VSPLTISH : PPC32InstPattern3 <"vspltish", Vpr, Imm5, 4, 844, 0, 1>; +def VSPLTISW : PPC32InstPattern3 <"vspltisw", Vpr, Imm5, 4, 908, 0, 1>; +def VPERM : PPC32InstPattern47 <"vperm", Vpr, Vpr, Vpr, Vpr, 4, 11, 0, 1>; +def VSLDOI : PPC32InstPattern48 <"vsldoi", Vpr, Vpr, Vpr, Imm4, 4, 0, 12, 0, 1>; +def VSLO : PPC32InstPattern2 <"vslo", Vpr, Vpr, Vpr, 4, 12, 0, 1>; +def VSRO : PPC32InstPattern2 <"vsro", Vpr, Vpr, Vpr, 4, 76, 0, 1>; +def VMAXUB : PPC32InstPattern2 <"vmaxub", Vpr, Vpr, Vpr, 4, 2, 0, 1>; +def VMAXSB : PPC32InstPattern2 <"vmaxsb", Vpr, Vpr, Vpr, 4, 258, 0, 1>; +def VMAXUH : PPC32InstPattern2 <"vmaxuh", Vpr, Vpr, Vpr, 4, 66, 0, 1>; +def VMAXSH : PPC32InstPattern2 <"vmaxsh", Vpr, Vpr, Vpr, 4, 322, 0, 1>; +def VMAXUW : PPC32InstPattern2 <"vmaxuw", Vpr, Vpr, Vpr, 4, 130, 0, 1>; +def VMAXSW : PPC32InstPattern2 <"vmaxsw", Vpr, Vpr, Vpr, 4, 386, 0, 1>; +def VMAXFP : PPC32InstPattern2 <"vmaxfp", Vpr, Vpr, Vpr, 4, 10, 0, 1>; +def VMINUB : PPC32InstPattern2 <"vminub", Vpr, Vpr, Vpr, 4, 514, 0, 1>; +def VMINSB : PPC32InstPattern2 <"vminsb", Vpr, Vpr, Vpr, 4, 770, 0, 1>; +def VMINUH : PPC32InstPattern2 <"vminuh", Vpr, Vpr, Vpr, 4, 578, 0, 1>; +def VMINSH : PPC32InstPattern2 <"vminsh", Vpr, Vpr, Vpr, 4, 834, 0, 1>; +def VMINUW : PPC32InstPattern2 <"vminuw", Vpr, Vpr, Vpr, 4, 642, 0, 1>; +def VMINSW : PPC32InstPattern2 <"vminsw", Vpr, Vpr, Vpr, 4, 898, 0, 1>; +def VMINFP : PPC32InstPattern2 <"vminfp", Vpr, Vpr, Vpr, 4, 74, 0, 1>; +def VREFP : PPC32InstPattern20 <"vrefp", Vpr, Vpr, 4, 0, 266, 0, 1>; +def VRSQRTEFP : PPC32InstPattern20 <"vrsqrtefp", Vpr, Vpr, 4, 0, 330, 0, 1>; +def VLOGEFP : PPC32InstPattern20 <"vlogefp", Vpr, Vpr, 4, 0, 458, 0, 1>; +def VEXPTEFP : PPC32InstPattern20 <"vexptefp", Vpr, Vpr, 4, 0, 394, 0, 1>; +def VNMSUBFP : PPC32InstPattern19 <"vnmsubfp", Vpr, Vpr, Vpr, Vpr, 4, 15, 0, 1>; +def VRFIN : PPC32InstPattern20 <"vrfin", Vpr, Vpr, 4, 0, 522, 0, 1>; +def VRFIZ : PPC32InstPattern20 <"vrfiz", Vpr, Vpr, 4, 0, 586, 0, 1>; +def VRFIP : PPC32InstPattern20 <"vrfip", Vpr, Vpr, 4, 0, 650, 0, 1>; +def VRFIM : PPC32InstPattern20 <"vrfim", Vpr, Vpr, 4, 0, 714, 0, 1>; +def VCTUXS : PPC32InstPattern35 <"vctuxs", Vpr, Vpr, Imm5, 4, 906, 0, 1>; +def VCTSXS : PPC32InstPattern35 <"vctsxs", Vpr, Vpr, Imm5, 4, 970, 0, 1>; +def VCFUX : PPC32InstPattern35 <"vcfux", Vpr, Vpr, Imm5, 4, 778, 0, 1>; +def VCFSX : PPC32InstPattern35 <"vcfsx", Vpr, Vpr, Imm5, 4, 842, 0, 1>; diff --git a/lib/Target/PowerPC/PPCJITInfo.h b/lib/Target/PowerPC/PPCJITInfo.h new file mode 100644 index 00000000000..bd808515991 --- /dev/null +++ b/lib/Target/PowerPC/PPCJITInfo.h @@ -0,0 +1,49 @@ +//===- PowerPCJITInfo.h - PowerPC impl. of the JIT interface ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the PowerPC implementation of the TargetJITInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef POWERPCJITINFO_H +#define POWERPCJITINFO_H + +#include "llvm/Target/TargetJITInfo.h" + +namespace llvm { + class TargetMachine; + class IntrinsicLowering; + + class PowerPCJITInfo : public TargetJITInfo { + TargetMachine &TM; + public: + PowerPCJITInfo(TargetMachine &tm) : TM(tm) {} + + /// addPassesToJITCompile - Add passes to the specified pass manager to + /// implement a fast dynamic compiler for this target. Return true if this + /// is not supported for this target. + /// + virtual void addPassesToJITCompile(FunctionPassManager &PM); + + /// replaceMachineCodeForFunction - Make it so that calling the function + /// whose machine code is at OLD turns into a call to NEW, perhaps by + /// overwriting OLD with a branch to NEW. This is used for self-modifying + /// code. + /// + virtual void replaceMachineCodeForFunction(void *Old, void *New); + + /// getJITStubForFunction - Create or return a stub for the specified + /// function. This stub acts just like the specified function, except that + /// it allows the "address" of the function to be taken without having to + /// generate code for it. + virtual void *getJITStubForFunction(Function *F, MachineCodeEmitter &MCE); + }; +} + +#endif diff --git a/lib/Target/PowerPC/PPCRegisterInfo.td b/lib/Target/PowerPC/PPCRegisterInfo.td new file mode 100644 index 00000000000..d6aaf4bda1c --- /dev/null +++ b/lib/Target/PowerPC/PPCRegisterInfo.td @@ -0,0 +1,82 @@ +//===- PowerPCReg.td - Describe the PowerPC Register File -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +class PPCReg : Register { + let Namespace = "PPC32"; +} + +// We identify all our registers with a 5-bit ID, for consistency's sake. + +// GPR - One of the 32 32-bit general-purpose registers +class GPR num> : PPCReg { + field bits<5> Num = num; +} + +// SPR - One of the 32-bit special-purpose registers +class SPR num> : PPCReg { + field bits<5> Num = num; +} + +// FPR - One of the 32 64-bit floating-point registers +class FPR num> : PPCReg { + field bits<5> Num = num; +} + +// CR - One of the 8 4-bit condition registers +class CR num> : PPCReg { + field bits<5> Num = num; +} + +// General-purpose registers +def R0 : GPR< 0>; def R1 : GPR< 1>; def R2 : GPR< 2>; def R3 : GPR< 3>; +def R4 : GPR< 4>; def R5 : GPR< 5>; def R6 : GPR< 6>; def R7 : GPR< 7>; +def R8 : GPR< 8>; def R9 : GPR< 9>; def R10 : GPR<10>; def R11 : GPR<11>; +def R12 : GPR<12>; def R13 : GPR<13>; def R14 : GPR<14>; def R15 : GPR<15>; +def R16 : GPR<16>; def R17 : GPR<17>; def R18 : GPR<18>; def R19 : GPR<19>; +def R20 : GPR<20>; def R21 : GPR<21>; def R22 : GPR<22>; def R23 : GPR<23>; +def R24 : GPR<24>; def R25 : GPR<25>; def R26 : GPR<26>; def R27 : GPR<27>; +def R28 : GPR<28>; def R29 : GPR<29>; def R30 : GPR<30>; def R31 : GPR<31>; + +// Floating-point registers +def F0 : FPR< 0>; def F1 : FPR< 1>; def F2 : FPR< 2>; def F3 : FPR< 3>; +def F4 : FPR< 4>; def F5 : FPR< 5>; def F6 : FPR< 6>; def F7 : FPR< 7>; +def F8 : FPR< 8>; def F9 : FPR< 9>; def F10 : FPR<10>; def F11 : FPR<11>; +def F12 : FPR<12>; def F13 : FPR<13>; def F14 : FPR<14>; def F15 : FPR<15>; +def F16 : FPR<16>; def F17 : FPR<17>; def F18 : FPR<18>; def F19 : FPR<19>; +def F20 : FPR<20>; def F21 : FPR<21>; def F22 : FPR<22>; def F23 : FPR<23>; +def F24 : FPR<24>; def F25 : FPR<25>; def F26 : FPR<26>; def F27 : FPR<27>; +def F28 : FPR<28>; def F29 : FPR<29>; def F30 : FPR<30>; def F31 : FPR<31>; + +// Condition registers +def CR0 : CR<0>; def CR1 : CR<1>; def CR2 : CR<2>; def CR3 : CR<3>; +def CR4 : CR<4>; def CR5 : CR<5>; def CR6 : CR<6>; def CR7 : CR<7>; + +// Floating-point status and control register +def FPSCR : SPR<0>; +// fiXed-point Exception Register? :-) +def XER : SPR<1>; +// Link register +def LR : SPR<2>; +// Count register +def CTR : SPR<3>; +// These are the "time base" registers which are read-only in user mode. +def TBL : SPR<4>; +def TBU : SPR<5>; + +/// Register classes: one for floats and another for non-floats. +def GPRC : RegisterClass; +def FPRC : RegisterClass; + diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp new file mode 100644 index 00000000000..e221f538416 --- /dev/null +++ b/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -0,0 +1,88 @@ +//===-- PowerPCTargetMachine.cpp - Define TargetMachine for PowerPC -------===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +#include "PowerPCTargetMachine.h" +#include "PowerPC.h" +#include "llvm/IntrinsicLowering.h" +#include "llvm/Module.h" +#include "llvm/PassManager.h" +#include "llvm/Target/TargetMachineImpls.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/Transforms/Scalar.h" +using namespace llvm; + +// allocatePowerPCTargetMachine - Allocate and return a subclass of +// TargetMachine that implements the PowerPC backend. +// +TargetMachine *llvm::allocatePowerPCTargetMachine(const Module &M, + IntrinsicLowering *IL) { + return new PowerPCTargetMachine(M, IL); +} + +/// PowerPCTargetMachine ctor - Create an ILP32 architecture model +/// +/// FIXME: Should double alignment be 8 bytes? Then we get a PtrAl != DoubleAl abort +PowerPCTargetMachine::PowerPCTargetMachine(const Module &M, + IntrinsicLowering *IL) + : TargetMachine("PowerPC", IL, false, 4, 4, 4, 4, 4, 4, 4, 4), + FrameInfo(TargetFrameInfo::StackGrowsDown, 16, -4), JITInfo(*this) { +} + +/// addPassesToEmitAssembly - Add passes to the specified pass manager +/// to implement a static compiler for this target. +/// +bool PowerPCTargetMachine::addPassesToEmitAssembly(PassManager &PM, + std::ostream &Out) { + // FIXME: Implement efficient support for garbage collection intrinsics. + PM.add(createLowerGCPass()); + + // FIXME: Implement the invoke/unwind instructions! + PM.add(createLowerInvokePass()); + + // FIXME: The code generator does not properly handle functions with + // unreachable basic blocks. + PM.add(createCFGSimplificationPass()); + + // FIXME: Implement the switch instruction in the instruction selector! + PM.add(createLowerSwitchPass()); + + PM.add(createPPCSimpleInstructionSelector(*this)); + PM.add(createRegisterAllocator()); + PM.add(createPrologEpilogCodeInserter()); + PM.add(createPPCCodePrinterPass(Out, *this)); + PM.add(createMachineCodeDeleter()); + return false; +} + +/// addPassesToJITCompile - Add passes to the specified pass manager to +/// implement a fast dynamic compiler for this target. +/// +void PowerPCJITInfo::addPassesToJITCompile(FunctionPassManager &PM) { + // FIXME: Implement efficient support for garbage collection intrinsics. + PM.add(createLowerGCPass()); + + // FIXME: Implement the invoke/unwind instructions! + PM.add(createLowerInvokePass()); + + // FIXME: The code generator does not properly handle functions with + // unreachable basic blocks. + PM.add(createCFGSimplificationPass()); + + // FIXME: Implement the switch instruction in the instruction selector! + PM.add(createLowerSwitchPass()); + + PM.add(createPPCSimpleInstructionSelector(TM)); + PM.add(createRegisterAllocator()); + PM.add(createPrologEpilogCodeInserter()); +} + diff --git a/lib/Target/PowerPC/PowerPC.td b/lib/Target/PowerPC/PowerPC.td new file mode 100644 index 00000000000..f70382f8169 --- /dev/null +++ b/lib/Target/PowerPC/PowerPC.td @@ -0,0 +1,44 @@ +//===- PowerPC.td - Describe the PowerPC Target Machine ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +// Get the target-independent interfaces which we are implementing... +// +include "../Target.td" + +//===----------------------------------------------------------------------===// +// Register File Description +//===----------------------------------------------------------------------===// + +include "PowerPCReg.td" +include "PowerPCInstrs.td" + +def PowerPCInstrInfo : InstrInfo { + let PHIInst = PHI; + + let TSFlagsFields = ["ArgCount", "Arg0Type", "Arg1Type", "Arg2Type", "Arg3Type", "Arg4Type", "VMX", "PPC64"]; + let TSFlagsShifts = [ 0, 3, 8, 13, 18, 23, 28, 29 ]; +} + +def PowerPC : Target { + // Pointers are 32-bits in size. + let PointerType = i32; + + // According to the Mach-O Runtime ABI, these regs are nonvolatile across + // calls: + let CalleeSavedRegisters = [R1, R13, R14, R15, R16, R17, R18, R19, + R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31, F14, F15, + F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, + F30, F31, CR2, CR3, CR4]; + + // Pull in Instruction Info: + let InstructionSet = PowerPCInstrInfo; +} diff --git a/lib/Target/PowerPC/PowerPCAsmPrinter.cpp b/lib/Target/PowerPC/PowerPCAsmPrinter.cpp new file mode 100644 index 00000000000..697be0907c2 --- /dev/null +++ b/lib/Target/PowerPC/PowerPCAsmPrinter.cpp @@ -0,0 +1,694 @@ +//===-- PPC32/Printer.cpp - Convert X86 LLVM code to Intel assembly ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal +// representation of machine-dependent LLVM code to Intel-format +// assembly language. This printer is the output mechanism used +// by `llc' and `lli -print-machineinstrs' on X86. +// +//===----------------------------------------------------------------------===// + +#include + +#include "PowerPC.h" +#include "PowerPCInstrInfo.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/Assembly/Writer.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Support/Mangler.h" +#include "Support/Statistic.h" +#include "Support/StringExtras.h" +#include "Support/CommandLine.h" + +namespace llvm { + +namespace { + Statistic<> EmittedInsts("asm-printer", "Number of machine instrs printed"); + + struct Printer : public MachineFunctionPass { + /// Output stream on which we're printing assembly code. + /// + std::ostream &O; + + /// Target machine description which we query for reg. names, data + /// layout, etc. + /// + TargetMachine &TM; + + /// Name-mangler for global names. + /// + Mangler *Mang; + std::set< std::string > Stubs; + std::set Strings; + + Printer(std::ostream &o, TargetMachine &tm) : O(o), TM(tm) { } + + /// We name each basic block in a Function with a unique number, so + /// that we can consistently refer to them later. This is cleared + /// at the beginning of each call to runOnMachineFunction(). + /// + typedef std::map ValueMapTy; + ValueMapTy NumberForBB; + + /// Cache of mangled name for current function. This is + /// recalculated at the beginning of each call to + /// runOnMachineFunction(). + /// + std::string CurrentFnName; + + virtual const char *getPassName() const { + return "PowerPC Assembly Printer"; + } + + void printMachineInstruction(const MachineInstr *MI); + void printOp(const MachineOperand &MO, + bool elideOffsetKeyword = false); + void printConstantPool(MachineConstantPool *MCP); + bool runOnMachineFunction(MachineFunction &F); + bool doInitialization(Module &M); + bool doFinalization(Module &M); + void emitGlobalConstant(const Constant* CV); + void emitConstantValueOnly(const Constant *CV); + }; +} // end of anonymous namespace + +/// createPPCCodePrinterPass - Returns a pass that prints the X86 +/// assembly code for a MachineFunction to the given output stream, +/// using the given target machine description. This should work +/// regardless of whether the function is in SSA form. +/// +FunctionPass *createPPCCodePrinterPass(std::ostream &o,TargetMachine &tm){ + return new Printer(o, tm); +} + +/// isStringCompatible - Can we treat the specified array as a string? +/// Only if it is an array of ubytes or non-negative sbytes. +/// +static bool isStringCompatible(const ConstantArray *CVA) { + const Type *ETy = cast(CVA->getType())->getElementType(); + if (ETy == Type::UByteTy) return true; + if (ETy != Type::SByteTy) return false; + + for (unsigned i = 0; i < CVA->getNumOperands(); ++i) + if (cast(CVA->getOperand(i))->getValue() < 0) + return false; + + return true; +} + +/// toOctal - Convert the low order bits of X into an octal digit. +/// +static inline char toOctal(int X) { + return (X&7)+'0'; +} + +/// getAsCString - Return the specified array as a C compatible +/// string, only if the predicate isStringCompatible is true. +/// +static void printAsCString(std::ostream &O, const ConstantArray *CVA) { + assert(isStringCompatible(CVA) && "Array is not string compatible!"); + + O << "\""; + for (unsigned i = 0; i < CVA->getNumOperands(); ++i) { + unsigned char C = cast(CVA->getOperand(i))->getRawValue(); + + if (C == '"') { + O << "\\\""; + } else if (C == '\\') { + O << "\\\\"; + } else if (isprint(C)) { + O << C; + } else { + switch(C) { + case '\b': O << "\\b"; break; + case '\f': O << "\\f"; break; + case '\n': O << "\\n"; break; + case '\r': O << "\\r"; break; + case '\t': O << "\\t"; break; + default: + O << '\\'; + O << toOctal(C >> 6); + O << toOctal(C >> 3); + O << toOctal(C >> 0); + break; + } + } + } + O << "\""; +} + +// Print out the specified constant, without a storage class. Only the +// constants valid in constant expressions can occur here. +void Printer::emitConstantValueOnly(const Constant *CV) { + if (CV->isNullValue()) + O << "0"; + else if (const ConstantBool *CB = dyn_cast(CV)) { + assert(CB == ConstantBool::True); + O << "1"; + } else if (const ConstantSInt *CI = dyn_cast(CV)) + O << CI->getValue(); + else if (const ConstantUInt *CI = dyn_cast(CV)) + O << CI->getValue(); + else if (const ConstantPointerRef *CPR = dyn_cast(CV)) + // This is a constant address for a global variable or function. Use the + // name of the variable or function as the address value. + O << Mang->getValueName(CPR->getValue()); + else if (const ConstantExpr *CE = dyn_cast(CV)) { + const TargetData &TD = TM.getTargetData(); + switch(CE->getOpcode()) { + case Instruction::GetElementPtr: { + // generate a symbolic expression for the byte address + const Constant *ptrVal = CE->getOperand(0); + std::vector idxVec(CE->op_begin()+1, CE->op_end()); + if (unsigned Offset = TD.getIndexedOffset(ptrVal->getType(), idxVec)) { + O << "("; + emitConstantValueOnly(ptrVal); + O << ") + " << Offset; + } else { + emitConstantValueOnly(ptrVal); + } + break; + } + case Instruction::Cast: { + // Support only non-converting or widening casts for now, that is, ones + // that do not involve a change in value. This assertion is really gross, + // and may not even be a complete check. + Constant *Op = CE->getOperand(0); + const Type *OpTy = Op->getType(), *Ty = CE->getType(); + + // Remember, kids, pointers on x86 can be losslessly converted back and + // forth into 32-bit or wider integers, regardless of signedness. :-P + assert(((isa(OpTy) + && (Ty == Type::LongTy || Ty == Type::ULongTy + || Ty == Type::IntTy || Ty == Type::UIntTy)) + || (isa(Ty) + && (OpTy == Type::LongTy || OpTy == Type::ULongTy + || OpTy == Type::IntTy || OpTy == Type::UIntTy)) + || (((TD.getTypeSize(Ty) >= TD.getTypeSize(OpTy)) + && OpTy->isLosslesslyConvertibleTo(Ty)))) + && "FIXME: Don't yet support this kind of constant cast expr"); + O << "("; + emitConstantValueOnly(Op); + O << ")"; + break; + } + case Instruction::Add: + O << "("; + emitConstantValueOnly(CE->getOperand(0)); + O << ") + ("; + emitConstantValueOnly(CE->getOperand(1)); + O << ")"; + break; + default: + assert(0 && "Unsupported operator!"); + } + } else { + assert(0 && "Unknown constant value!"); + } +} + +// Print a constant value or values, with the appropriate storage class as a +// prefix. +void Printer::emitGlobalConstant(const Constant *CV) { + const TargetData &TD = TM.getTargetData(); + + if (CV->isNullValue()) { + O << "\t.space\t " << TD.getTypeSize(CV->getType()) << "\n"; + return; + } else if (const ConstantArray *CVA = dyn_cast(CV)) { + if (isStringCompatible(CVA)) { + O << ".ascii"; + printAsCString(O, CVA); + O << "\n"; + } else { // Not a string. Print the values in successive locations + const std::vector &constValues = CVA->getValues(); + for (unsigned i=0; i < constValues.size(); i++) + emitGlobalConstant(cast(constValues[i].get())); + } + return; + } else if (const ConstantStruct *CVS = dyn_cast(CV)) { + // Print the fields in successive locations. Pad to align if needed! + const StructLayout *cvsLayout = TD.getStructLayout(CVS->getType()); + const std::vector& constValues = CVS->getValues(); + unsigned sizeSoFar = 0; + for (unsigned i=0, N = constValues.size(); i < N; i++) { + const Constant* field = cast(constValues[i].get()); + + // Check if padding is needed and insert one or more 0s. + unsigned fieldSize = TD.getTypeSize(field->getType()); + unsigned padSize = ((i == N-1? cvsLayout->StructSize + : cvsLayout->MemberOffsets[i+1]) + - cvsLayout->MemberOffsets[i]) - fieldSize; + sizeSoFar += fieldSize + padSize; + + // Now print the actual field value + emitGlobalConstant(field); + + // Insert the field padding unless it's zero bytes... + if (padSize) + O << "\t.space\t " << padSize << "\n"; + } + assert(sizeSoFar == cvsLayout->StructSize && + "Layout of constant struct may be incorrect!"); + return; + } else if (const ConstantFP *CFP = dyn_cast(CV)) { + // FP Constants are printed as integer constants to avoid losing + // precision... + double Val = CFP->getValue(); + switch (CFP->getType()->getPrimitiveID()) { + default: assert(0 && "Unknown floating point type!"); + case Type::FloatTyID: { + union FU { // Abide by C TBAA rules + float FVal; + unsigned UVal; + } U; + U.FVal = Val; + O << ".long\t" << U.UVal << "\t# float " << Val << "\n"; + return; + } + case Type::DoubleTyID: { + union DU { // Abide by C TBAA rules + double FVal; + uint64_t UVal; + struct { + uint32_t MSWord; + uint32_t LSWord; + } T; + } U; + U.FVal = Val; + + O << ".long\t" << U.T.MSWord << "\t# double most significant word " << Val << "\n"; + O << ".long\t" << U.T.LSWord << "\t# double least significant word" << Val << "\n"; + return; + } + } + } else if (CV->getType()->getPrimitiveSize() == 64) { + const ConstantInt *CI = dyn_cast(CV); + if(CI) { + union DU { // Abide by C TBAA rules + int64_t UVal; + struct { + uint32_t MSWord; + uint32_t LSWord; + } T; + } U; + U.UVal = CI->getRawValue(); + + O << ".long\t" << U.T.MSWord << "\t# Double-word most significant word " << U.UVal << "\n"; + O << ".long\t" << U.T.LSWord << "\t# Double-word least significant word" << U.UVal << "\n"; + return; + } + } + + const Type *type = CV->getType(); + O << "\t"; + switch (type->getPrimitiveID()) { + case Type::UByteTyID: case Type::SByteTyID: + O << ".byte"; + break; + case Type::UShortTyID: case Type::ShortTyID: + O << ".short"; + break; + case Type::BoolTyID: + case Type::PointerTyID: + case Type::UIntTyID: case Type::IntTyID: + O << ".long"; + break; + case Type::ULongTyID: case Type::LongTyID: + assert (0 && "Should have already output double-word constant."); + case Type::FloatTyID: case Type::DoubleTyID: + assert (0 && "Should have already output floating point constant."); + default: + assert (0 && "Can't handle printing this type of thing"); + break; + } + O << "\t"; + emitConstantValueOnly(CV); + O << "\n"; +} + +/// printConstantPool - Print to the current output stream assembly +/// representations of the constants in the constant pool MCP. This is +/// used to print out constants which have been "spilled to memory" by +/// the code generator. +/// +void Printer::printConstantPool(MachineConstantPool *MCP) { + const std::vector &CP = MCP->getConstants(); + const TargetData &TD = TM.getTargetData(); + + if (CP.empty()) return; + + for (unsigned i = 0, e = CP.size(); i != e; ++i) { + O << "\t.const\n"; + O << "\t.align " << (unsigned)TD.getTypeAlignment(CP[i]->getType()) + << "\n"; + O << ".CPI" << CurrentFnName << "_" << i << ":\t\t\t\t\t#" + << *CP[i] << "\n"; + emitGlobalConstant(CP[i]); + } +} + +/// runOnMachineFunction - This uses the printMachineInstruction() +/// method to print assembly for each instruction. +/// +bool Printer::runOnMachineFunction(MachineFunction &MF) { + // BBNumber is used here so that a given Printer will never give two + // BBs the same name. (If you have a better way, please let me know!) + static unsigned BBNumber = 0; + + O << "\n\n"; + // What's my mangled name? + CurrentFnName = Mang->getValueName(MF.getFunction()); + + // Print out constants referenced by the function + printConstantPool(MF.getConstantPool()); + + // Print out labels for the function. + O << "\t.text\n"; + O << "\t.globl\t" << CurrentFnName << "\n"; + O << "\t.align 5\n"; + O << CurrentFnName << ":\n"; + + // Number each basic block so that we can consistently refer to them + // in PC-relative references. + NumberForBB.clear(); + for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); + I != E; ++I) { + NumberForBB[I->getBasicBlock()] = BBNumber++; + } + + // Print out code for the function. + for (MachineFunction::const_iterator I = MF.begin(), E = MF.end(); + I != E; ++I) { + // Print a label for the basic block. + O << "L" << NumberForBB[I->getBasicBlock()] << ":\t# " + << I->getBasicBlock()->getName() << "\n"; + for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end(); + II != E; ++II) { + // Print the assembly for the instruction. + O << "\t"; + printMachineInstruction(II); + } + } + + // We didn't modify anything. + return false; +} + + + +void Printer::printOp(const MachineOperand &MO, + bool elideOffsetKeyword /* = false */) { + const MRegisterInfo &RI = *TM.getRegisterInfo(); + int new_symbol; + + switch (MO.getType()) { + case MachineOperand::MO_VirtualRegister: + if (Value *V = MO.getVRegValueOrNull()) { + O << "<" << V->getName() << ">"; + return; + } + // FALLTHROUGH + case MachineOperand::MO_MachineRegister: + O << RI.get(MO.getReg()).Name; + return; + + case MachineOperand::MO_SignExtendedImmed: + case MachineOperand::MO_UnextendedImmed: + O << (int)MO.getImmedValue(); + return; + case MachineOperand::MO_MachineBasicBlock: { + MachineBasicBlock *MBBOp = MO.getMachineBasicBlock(); + O << ".LBB" << Mang->getValueName(MBBOp->getParent()->getFunction()) + << "_" << MBBOp->getNumber () << "\t# " + << MBBOp->getBasicBlock ()->getName (); + return; + } + case MachineOperand::MO_PCRelativeDisp: + std::cerr << "Shouldn't use addPCDisp() when building PPC MachineInstrs"; + abort (); + return; + case MachineOperand::MO_GlobalAddress: + if (!elideOffsetKeyword) { + if(isa(MO.getGlobal())) { + Stubs.insert(Mang->getValueName(MO.getGlobal())); + O << "L" << Mang->getValueName(MO.getGlobal()) << "$stub"; + } else { + O << Mang->getValueName(MO.getGlobal()); + } + } + return; + case MachineOperand::MO_ExternalSymbol: + O << MO.getSymbolName(); + return; + default: + O << ""; return; + } +} + +#if 0 +static inline +unsigned int ValidOpcodes(const MachineInstr *MI, unsigned int ArgType[5]) { + int i; + unsigned int retval = 1; + + for(i = 0; i<5; i++) { + switch(ArgType[i]) { + case none: + break; + case Gpr: + case Gpr0: + Type::UIntTy + case Simm16: + case Zimm16: + case PCRelimm24: + case Imm24: + case Imm5: + case PCRelimm14: + case Imm14: + case Imm2: + case Crf: + case Imm3: + case Imm1: + case Fpr: + case Imm4: + case Imm8: + case Disimm16: + case Spr: + case Sgr: + }; + + } + } +} +#endif + +/// printMachineInstruction -- Print out a single PPC32 LLVM instruction +/// MI in Darwin syntax to the current output stream. +/// +void Printer::printMachineInstruction(const MachineInstr *MI) { + unsigned Opcode = MI->getOpcode(); + const TargetInstrInfo &TII = *TM.getInstrInfo(); + const TargetInstrDescriptor &Desc = TII.get(Opcode); + unsigned int i; + + unsigned int ArgCount = Desc.TSFlags & PPC32II::ArgCountMask; + unsigned int ArgType[5]; + + + ArgType[0] = (Desc.TSFlags>>PPC32II::Arg0TypeShift) & PPC32II::ArgTypeMask; + ArgType[1] = (Desc.TSFlags>>PPC32II::Arg1TypeShift) & PPC32II::ArgTypeMask; + ArgType[2] = (Desc.TSFlags>>PPC32II::Arg2TypeShift) & PPC32II::ArgTypeMask; + ArgType[3] = (Desc.TSFlags>>PPC32II::Arg3TypeShift) & PPC32II::ArgTypeMask; + ArgType[4] = (Desc.TSFlags>>PPC32II::Arg4TypeShift) & PPC32II::ArgTypeMask; + + assert ( ((Desc.TSFlags & PPC32II::VMX) == 0) && "Instruction requires VMX support"); + assert ( ((Desc.TSFlags & PPC32II::PPC64) == 0) && "Instruction requires 64 bit support"); + //assert ( ValidOpcodes(MI, ArgType) && "Instruction has invalid inputs"); + ++EmittedInsts; + + if(Opcode == PPC32::MovePCtoLR) { + O << "mflr r0\n"; + O << "bcl 20,31,L" << CurrentFnName << "$pb\n"; + O << "L" << CurrentFnName << "$pb:\n"; + return; + } + + O << TII.getName(MI->getOpcode()) << " "; + std::cout << TII.getName(MI->getOpcode()) << " expects " << ArgCount << " args\n"; + + if(Opcode == PPC32::LOADLoAddr) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << ", lo16("; + printOp(MI->getOperand(2)); + O << "-L" << CurrentFnName << "$pb)\n"; + return; + } + + if(Opcode == PPC32::LOADHiAddr) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << ", ha16(" ; + printOp(MI->getOperand(2)); + O << "-L" << CurrentFnName << "$pb)\n"; + return; + } + + if( (ArgCount == 3) && (ArgType[1] == PPC32II::Disimm16) ) { + printOp(MI->getOperand(0)); + O << ", "; + printOp(MI->getOperand(1)); + O << "("; + if((ArgType[2] == PPC32II::Gpr0) && (MI->getOperand(2).getReg() == PPC32::R0)) { + O << "0"; + } else { + printOp(MI->getOperand(2)); + } + O << ")\n"; + } else { + for(i = 0; i< ArgCount; i++) { + if( (ArgType[i] == PPC32II::Gpr0) && ((MI->getOperand(i).getReg()) == PPC32::R0)) { + O << "0"; + } else { + //std::cout << "DEBUG " << (*(TM.getRegisterInfo())).get(MI->getOperand(i).getReg()).Name << "\n"; + printOp(MI->getOperand(i)); + } + if( ArgCount - 1 == i) { + O << "\n"; + } else { + O << ", "; + } + } + } + + return; +} + +bool Printer::doInitialization(Module &M) { + // Tell gas we are outputting Intel syntax (not AT&T syntax) assembly. + // + // Bug: gas in `intel_syntax noprefix' mode interprets the symbol `Sp' in an + // instruction as a reference to the register named sp, and if you try to + // reference a symbol `Sp' (e.g. `mov ECX, OFFSET Sp') then it gets lowercased + // before being looked up in the symbol table. This creates spurious + // `undefined symbol' errors when linking. Workaround: Do not use `noprefix' + // mode, and decorate all register names with percent signs. + // O << "\t.intel_syntax\n"; + Mang = new Mangler(M, true); + return false; // success +} + +// SwitchSection - Switch to the specified section of the executable if we are +// not already in it! +// +static void SwitchSection(std::ostream &OS, std::string &CurSection, + const char *NewSection) { + if (CurSection != NewSection) { + CurSection = NewSection; + if (!CurSection.empty()) + OS << "\t" << NewSection << "\n"; + } +} + +bool Printer::doFinalization(Module &M) { + const TargetData &TD = TM.getTargetData(); + std::string CurSection; + + // Print out module-level global variables here. + for (Module::const_giterator I = M.gbegin(), E = M.gend(); I != E; ++I) + if (I->hasInitializer()) { // External global require no code + O << "\n\n"; + std::string name = Mang->getValueName(I); + Constant *C = I->getInitializer(); + unsigned Size = TD.getTypeSize(C->getType()); + unsigned Align = TD.getTypeAlignment(C->getType()); + + if (C->isNullValue() && + (I->hasLinkOnceLinkage() || I->hasInternalLinkage() || + I->hasWeakLinkage() /* FIXME: Verify correct */)) { + SwitchSection(O, CurSection, ".data"); + if (I->hasInternalLinkage()) + O << "\t.local " << name << "\n"; + + O << "\t.comm " << name << "," << TD.getTypeSize(C->getType()) + << "," << (unsigned)TD.getTypeAlignment(C->getType()); + O << "\t\t# "; + WriteAsOperand(O, I, true, true, &M); + O << "\n"; + } else { + switch (I->getLinkage()) { + case GlobalValue::LinkOnceLinkage: + case GlobalValue::WeakLinkage: // FIXME: Verify correct for weak. + // Nonnull linkonce -> weak + O << "\t.weak " << name << "\n"; + SwitchSection(O, CurSection, ""); + O << "\t.section\t.llvm.linkonce.d." << name << ",\"aw\",@progbits\n"; + break; + + case GlobalValue::AppendingLinkage: + // FIXME: appending linkage variables should go into a section of + // their name or something. For now, just emit them as external. + case GlobalValue::ExternalLinkage: + // If external or appending, declare as a global symbol + O << "\t.globl " << name << "\n"; + // FALL THROUGH + case GlobalValue::InternalLinkage: + if (C->isNullValue()) + SwitchSection(O, CurSection, ".bss"); + else + SwitchSection(O, CurSection, ".data"); + break; + } + + O << "\t.align " << Align << "\n"; + O << name << ":\t\t\t\t# "; + WriteAsOperand(O, I, true, true, &M); + O << " = "; + WriteAsOperand(O, C, false, false, &M); + O << "\n"; + emitGlobalConstant(C); + } + } + + for(std::set::iterator i = Stubs.begin(); i != Stubs.end(); ++i) { + O << ".data\n"; + O << ".section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32\n"; + O << "\t.align 2\n"; + O << "L" << *i << "$stub:\n"; + O << "\t.indirect_symbol " << *i << "\n"; + O << "\tmflr r0\n"; + O << "\tbcl 20,31,L0$" << *i << "\n"; + O << "L0$" << *i << ":\n"; + O << "\tmflr r11\n"; + O << "\taddis r11,r11,ha16(L" << *i << "$lazy_ptr-L0$" << *i << ")\n"; + O << "\tmtlr r0\n"; + O << "\tlwzu r12,lo16(L" << *i << "$lazy_ptr-L0$" << *i << ")(r11)\n"; + O << "\tmtctr r12\n"; + O << "\tbctr\n"; + O << ".data\n"; + O << ".lazy_symbol_pointer\n"; + O << "L" << *i << "$lazy_ptr:\n"; + O << ".indirect_symbol " << *i << "\n"; + O << ".long dyld_stub_binding_helper\n"; + + } + + delete Mang; + return false; // success +} + +} // End llvm namespace diff --git a/lib/Target/PowerPC/PowerPCCodeEmitter.cpp b/lib/Target/PowerPC/PowerPCCodeEmitter.cpp new file mode 100644 index 00000000000..3c423e5cef6 --- /dev/null +++ b/lib/Target/PowerPC/PowerPCCodeEmitter.cpp @@ -0,0 +1,43 @@ +//===-- PowerPCCodeEmitter.cpp - JIT Code Emitter for PowerPC -----*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +//===----------------------------------------------------------------------===// + +#include "PowerPCTargetMachine.h" + +namespace llvm { + +/// addPassesToEmitMachineCode - Add passes to the specified pass manager to get +/// machine code emitted. This uses a MachineCodeEmitter object to handle +/// actually outputting the machine code and resolving things like the address +/// of functions. This method should returns true if machine code emission is +/// not supported. +/// +bool PowerPCTargetMachine::addPassesToEmitMachineCode(FunctionPassManager &PM, + MachineCodeEmitter &MCE) { + return true; + // It should go something like this: + // PM.add(new Emitter(MCE)); // Machine code emitter pass for PowerPC + // Delete machine code for this function after emitting it: + // PM.add(createMachineCodeDeleter()); +} + +void *PowerPCJITInfo::getJITStubForFunction(Function *F, + MachineCodeEmitter &MCE) { + assert (0 && "PowerPCJITInfo::getJITStubForFunction not implemented"); + return 0; +} + +void PowerPCJITInfo::replaceMachineCodeForFunction (void *Old, void *New) { + assert (0 && "PowerPCJITInfo::replaceMachineCodeForFunction not implemented"); +} + +} // end llvm namespace + diff --git a/lib/Target/PowerPC/PowerPCISelSimple.cpp b/lib/Target/PowerPC/PowerPCISelSimple.cpp new file mode 100644 index 00000000000..c9c0e2d870a --- /dev/null +++ b/lib/Target/PowerPC/PowerPCISelSimple.cpp @@ -0,0 +1,2621 @@ +//===-- InstSelectSimple.cpp - A simple instruction selector for PowerPC --===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "PowerPC.h" +#include "PowerPCInstrBuilder.h" +#include "PowerPCInstrInfo.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Instructions.h" +#include "llvm/IntrinsicLowering.h" +#include "llvm/Pass.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/SSARegMap.h" +#include "llvm/Target/MRegisterInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/InstVisitor.h" +using namespace llvm; + +namespace { + /// TypeClass - Used by the PowerPC backend to group LLVM types by their basic PPC + /// Representation. + /// + enum TypeClass { + cByte, cShort, cInt, cFP, cLong + }; +} + +/// getClass - Turn a primitive type into a "class" number which is based on the +/// size of the type, and whether or not it is floating point. +/// +static inline TypeClass getClass(const Type *Ty) { + switch (Ty->getPrimitiveID()) { + case Type::SByteTyID: + case Type::UByteTyID: return cByte; // Byte operands are class #0 + case Type::ShortTyID: + case Type::UShortTyID: return cShort; // Short operands are class #1 + case Type::IntTyID: + case Type::UIntTyID: + case Type::PointerTyID: return cInt; // Int's and pointers are class #2 + + case Type::FloatTyID: + case Type::DoubleTyID: return cFP; // Floating Point is #3 + + case Type::LongTyID: + case Type::ULongTyID: return cLong; // Longs are class #4 + default: + assert(0 && "Invalid type to getClass!"); + return cByte; // not reached + } +} + +// getClassB - Just like getClass, but treat boolean values as ints. +static inline TypeClass getClassB(const Type *Ty) { + if (Ty == Type::BoolTy) return cInt; + return getClass(Ty); +} + +namespace { + struct ISel : public FunctionPass, InstVisitor { + TargetMachine &TM; + MachineFunction *F; // The function we are compiling into + MachineBasicBlock *BB; // The current MBB we are compiling + int VarArgsFrameIndex; // FrameIndex for start of varargs area + int ReturnAddressIndex; // FrameIndex for the return address + + std::map RegMap; // Mapping between Val's and SSA Regs + + // MBBMap - Mapping between LLVM BB -> Machine BB + std::map MBBMap; + + // AllocaMap - Mapping from fixed sized alloca instructions to the + // FrameIndex for the alloca. + std::map AllocaMap; + + ISel(TargetMachine &tm) : TM(tm), F(0), BB(0) {} + + /// runOnFunction - Top level implementation of instruction selection for + /// the entire function. + /// + bool runOnFunction(Function &Fn) { + // First pass over the function, lower any unknown intrinsic functions + // with the IntrinsicLowering class. + LowerUnknownIntrinsicFunctionCalls(Fn); + + F = &MachineFunction::construct(&Fn, TM); + + // Create all of the machine basic blocks for the function... + for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I) + F->getBasicBlockList().push_back(MBBMap[I] = new MachineBasicBlock(I)); + + BB = &F->front(); + + // Set up a frame object for the return address. This is used by the + // llvm.returnaddress & llvm.frameaddress intrinisics. + ReturnAddressIndex = F->getFrameInfo()->CreateFixedObject(4, -4); + + // Copy incoming arguments off of the stack... + LoadArgumentsToVirtualRegs(Fn); + + // Instruction select everything except PHI nodes + visit(Fn); + + // Select the PHI nodes + SelectPHINodes(); + + RegMap.clear(); + MBBMap.clear(); + AllocaMap.clear(); + F = 0; + // We always build a machine code representation for the function + return true; + } + + virtual const char *getPassName() const { + return "PowerPC Simple Instruction Selection"; + } + + /// visitBasicBlock - This method is called when we are visiting a new basic + /// block. This simply creates a new MachineBasicBlock to emit code into + /// and adds it to the current MachineFunction. Subsequent visit* for + /// instructions will be invoked for all instructions in the basic block. + /// + void visitBasicBlock(BasicBlock &LLVM_BB) { + BB = MBBMap[&LLVM_BB]; + } + + /// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the + /// function, lowering any calls to unknown intrinsic functions into the + /// equivalent LLVM code. + /// + void LowerUnknownIntrinsicFunctionCalls(Function &F); + + /// LoadArgumentsToVirtualRegs - Load all of the arguments to this function + /// from the stack into virtual registers. + /// + void LoadArgumentsToVirtualRegs(Function &F); + + /// SelectPHINodes - Insert machine code to generate phis. This is tricky + /// because we have to generate our sources into the source basic blocks, + /// not the current one. + /// + void SelectPHINodes(); + + // Visitation methods for various instructions. These methods simply emit + // fixed PowerPC code for each instruction. + + // Control flow operators + void visitReturnInst(ReturnInst &RI); + void visitBranchInst(BranchInst &BI); + + struct ValueRecord { + Value *Val; + unsigned Reg; + const Type *Ty; + ValueRecord(unsigned R, const Type *T) : Val(0), Reg(R), Ty(T) {} + ValueRecord(Value *V) : Val(V), Reg(0), Ty(V->getType()) {} + }; + void doCall(const ValueRecord &Ret, MachineInstr *CallMI, + const std::vector &Args); + void visitCallInst(CallInst &I); + void visitIntrinsicCall(Intrinsic::ID ID, CallInst &I); + + // Arithmetic operators + void visitSimpleBinary(BinaryOperator &B, unsigned OpcodeClass); + void visitAdd(BinaryOperator &B) { visitSimpleBinary(B, 0); } + void visitSub(BinaryOperator &B) { visitSimpleBinary(B, 1); } + void visitMul(BinaryOperator &B); + + void visitDiv(BinaryOperator &B) { visitDivRem(B); } + void visitRem(BinaryOperator &B) { visitDivRem(B); } + void visitDivRem(BinaryOperator &B); + + // Bitwise operators + void visitAnd(BinaryOperator &B) { visitSimpleBinary(B, 2); } + void visitOr (BinaryOperator &B) { visitSimpleBinary(B, 3); } + void visitXor(BinaryOperator &B) { visitSimpleBinary(B, 4); } + + // Comparison operators... + void visitSetCondInst(SetCondInst &I); + unsigned EmitComparison(unsigned OpNum, Value *Op0, Value *Op1, + MachineBasicBlock *MBB, + MachineBasicBlock::iterator MBBI); + void visitSelectInst(SelectInst &SI); + + + // Memory Instructions + void visitLoadInst(LoadInst &I); + void visitStoreInst(StoreInst &I); + void visitGetElementPtrInst(GetElementPtrInst &I); + void visitAllocaInst(AllocaInst &I); + void visitMallocInst(MallocInst &I); + void visitFreeInst(FreeInst &I); + + // Other operators + void visitShiftInst(ShiftInst &I); + void visitPHINode(PHINode &I) {} // PHI nodes handled by second pass + void visitCastInst(CastInst &I); + void visitVANextInst(VANextInst &I); + void visitVAArgInst(VAArgInst &I); + + void visitInstruction(Instruction &I) { + std::cerr << "Cannot instruction select: " << I; + abort(); + } + + /// promote32 - Make a value 32-bits wide, and put it somewhere. + /// + void promote32(unsigned targetReg, const ValueRecord &VR); + + /// emitGEPOperation - Common code shared between visitGetElementPtrInst and + /// constant expression GEP support. + /// + void emitGEPOperation(MachineBasicBlock *BB, MachineBasicBlock::iterator IP, + Value *Src, User::op_iterator IdxBegin, + User::op_iterator IdxEnd, unsigned TargetReg); + + /// emitCastOperation - Common code shared between visitCastInst and + /// constant expression cast support. + /// + void emitCastOperation(MachineBasicBlock *BB,MachineBasicBlock::iterator IP, + Value *Src, const Type *DestTy, unsigned TargetReg); + + /// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary + /// and constant expression support. + /// + void emitSimpleBinaryOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned TargetReg); + + /// emitBinaryFPOperation - This method handles emission of floating point + /// Add (0), Sub (1), Mul (2), and Div (3) operations. + void emitBinaryFPOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned TargetReg); + + void emitMultiply(MachineBasicBlock *BB, MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned TargetReg); + + void doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI, + unsigned DestReg, const Type *DestTy, + unsigned Op0Reg, unsigned Op1Reg); + void doMultiplyConst(MachineBasicBlock *MBB, + MachineBasicBlock::iterator MBBI, + unsigned DestReg, const Type *DestTy, + unsigned Op0Reg, unsigned Op1Val); + + void emitDivRemOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, bool isDiv, + unsigned TargetReg); + + /// emitSetCCOperation - Common code shared between visitSetCondInst and + /// constant expression support. + /// + void emitSetCCOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned Opcode, + unsigned TargetReg); + + /// emitShiftOperation - Common code shared between visitShiftInst and + /// constant expression support. + /// + void emitShiftOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op, Value *ShiftAmount, bool isLeftShift, + const Type *ResultTy, unsigned DestReg); + + /// emitSelectOperation - Common code shared between visitSelectInst and the + /// constant expression support. + void emitSelectOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Cond, Value *TrueVal, Value *FalseVal, + unsigned DestReg); + + /// copyConstantToRegister - Output the instructions required to put the + /// specified constant into the specified register. + /// + void copyConstantToRegister(MachineBasicBlock *MBB, + MachineBasicBlock::iterator MBBI, + Constant *C, unsigned Reg); + + void emitUCOM(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI, + unsigned LHS, unsigned RHS); + + /// makeAnotherReg - This method returns the next register number we haven't + /// yet used. + /// + /// Long values are handled somewhat specially. They are always allocated + /// as pairs of 32 bit integer values. The register number returned is the + /// lower 32 bits of the long value, and the regNum+1 is the upper 32 bits + /// of the long value. + /// + unsigned makeAnotherReg(const Type *Ty) { + assert(dynamic_cast(TM.getRegisterInfo()) && + "Current target doesn't have PPC reg info??"); + const PowerPCRegisterInfo *MRI = + static_cast(TM.getRegisterInfo()); + if (Ty == Type::LongTy || Ty == Type::ULongTy) { + const TargetRegisterClass *RC = MRI->getRegClassForType(Type::IntTy); + // Create the lower part + F->getSSARegMap()->createVirtualRegister(RC); + // Create the upper part. + return F->getSSARegMap()->createVirtualRegister(RC)-1; + } + + // Add the mapping of regnumber => reg class to MachineFunction + const TargetRegisterClass *RC = MRI->getRegClassForType(Ty); + return F->getSSARegMap()->createVirtualRegister(RC); + } + + /// getReg - This method turns an LLVM value into a register number. + /// + unsigned getReg(Value &V) { return getReg(&V); } // Allow references + unsigned getReg(Value *V) { + // Just append to the end of the current bb. + MachineBasicBlock::iterator It = BB->end(); + return getReg(V, BB, It); + } + unsigned getReg(Value *V, MachineBasicBlock *MBB, + MachineBasicBlock::iterator IPt); + + /// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca + /// that is to be statically allocated with the initial stack frame + /// adjustment. + unsigned getFixedSizedAllocaFI(AllocaInst *AI); + }; +} + +/// dyn_castFixedAlloca - If the specified value is a fixed size alloca +/// instruction in the entry block, return it. Otherwise, return a null +/// pointer. +static AllocaInst *dyn_castFixedAlloca(Value *V) { + if (AllocaInst *AI = dyn_cast(V)) { + BasicBlock *BB = AI->getParent(); + if (isa(AI->getArraySize()) && BB ==&BB->getParent()->front()) + return AI; + } + return 0; +} + +/// getReg - This method turns an LLVM value into a register number. +/// +unsigned ISel::getReg(Value *V, MachineBasicBlock *MBB, + MachineBasicBlock::iterator IPt) { + // If this operand is a constant, emit the code to copy the constant into + // the register here... + // + if (Constant *C = dyn_cast(V)) { + unsigned Reg = makeAnotherReg(V->getType()); + copyConstantToRegister(MBB, IPt, C, Reg); + return Reg; + } else if (GlobalValue *GV = dyn_cast(V)) { + unsigned Reg1 = makeAnotherReg(V->getType()); + unsigned Reg2 = makeAnotherReg(V->getType()); + // Move the address of the global into the register + BuildMI(*MBB, IPt, PPC32::LOADHiAddr, 2, Reg1).addReg(PPC32::R0).addGlobalAddress(GV); + BuildMI(*MBB, IPt, PPC32::LOADLoAddr, 2, Reg2).addReg(Reg1).addGlobalAddress(GV); + return Reg2; + } else if (CastInst *CI = dyn_cast(V)) { + // Do not emit noop casts at all. + if (getClassB(CI->getType()) == getClassB(CI->getOperand(0)->getType())) + return getReg(CI->getOperand(0), MBB, IPt); + } else if (AllocaInst *AI = dyn_castFixedAlloca(V)) { + unsigned Reg = makeAnotherReg(V->getType()); + unsigned FI = getFixedSizedAllocaFI(AI); + addFrameReference(BuildMI(*MBB, IPt, PPC32::ADDI, 2, Reg), FI, 0, false); + return Reg; + } + + unsigned &Reg = RegMap[V]; + if (Reg == 0) { + Reg = makeAnotherReg(V->getType()); + RegMap[V] = Reg; + } + + return Reg; +} + +/// getFixedSizedAllocaFI - Return the frame index for a fixed sized alloca +/// that is to be statically allocated with the initial stack frame +/// adjustment. +unsigned ISel::getFixedSizedAllocaFI(AllocaInst *AI) { + // Already computed this? + std::map::iterator I = AllocaMap.lower_bound(AI); + if (I != AllocaMap.end() && I->first == AI) return I->second; + + const Type *Ty = AI->getAllocatedType(); + ConstantUInt *CUI = cast(AI->getArraySize()); + unsigned TySize = TM.getTargetData().getTypeSize(Ty); + TySize *= CUI->getValue(); // Get total allocated size... + unsigned Alignment = TM.getTargetData().getTypeAlignment(Ty); + + // Create a new stack object using the frame manager... + int FrameIdx = F->getFrameInfo()->CreateStackObject(TySize, Alignment); + AllocaMap.insert(I, std::make_pair(AI, FrameIdx)); + return FrameIdx; +} + + +/// copyConstantToRegister - Output the instructions required to put the +/// specified constant into the specified register. +/// +void ISel::copyConstantToRegister(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Constant *C, unsigned R) { + if (ConstantExpr *CE = dyn_cast(C)) { + unsigned Class = 0; + switch (CE->getOpcode()) { + case Instruction::GetElementPtr: + emitGEPOperation(MBB, IP, CE->getOperand(0), + CE->op_begin()+1, CE->op_end(), R); + return; + case Instruction::Cast: + emitCastOperation(MBB, IP, CE->getOperand(0), CE->getType(), R); + return; + + case Instruction::Xor: ++Class; // FALL THROUGH + case Instruction::Or: ++Class; // FALL THROUGH + case Instruction::And: ++Class; // FALL THROUGH + case Instruction::Sub: ++Class; // FALL THROUGH + case Instruction::Add: + emitSimpleBinaryOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + Class, R); + return; + + case Instruction::Mul: + emitMultiply(MBB, IP, CE->getOperand(0), CE->getOperand(1), R); + return; + + case Instruction::Div: + case Instruction::Rem: + emitDivRemOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOpcode() == Instruction::Div, R); + return; + + case Instruction::SetNE: + case Instruction::SetEQ: + case Instruction::SetLT: + case Instruction::SetGT: + case Instruction::SetLE: + case Instruction::SetGE: + emitSetCCOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOpcode(), R); + return; + + case Instruction::Shl: + case Instruction::Shr: + emitShiftOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOpcode() == Instruction::Shl, CE->getType(), R); + return; + + case Instruction::Select: + emitSelectOperation(MBB, IP, CE->getOperand(0), CE->getOperand(1), + CE->getOperand(2), R); + return; + + default: + std::cerr << "Offending expr: " << C << "\n"; + assert(0 && "Constant expression not yet handled!\n"); + } + } + + if (C->getType()->isIntegral()) { + unsigned Class = getClassB(C->getType()); + + if (Class == cLong) { + // Copy the value into the register pair. + uint64_t Val = cast(C)->getRawValue(); + unsigned hiTmp = makeAnotherReg(Type::IntTy); + unsigned loTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::ADDIS, 2, loTmp).addReg(PPC32::R0).addImm(Val >> 48); + BuildMI(*MBB, IP, PPC32::ORI, 2, R).addReg(loTmp).addImm((Val >> 32) & 0xFFFF); + BuildMI(*MBB, IP, PPC32::ADDIS, 2, hiTmp).addReg(PPC32::R0).addImm((Val >> 16) & 0xFFFF); + BuildMI(*MBB, IP, PPC32::ORI, 2, R+1).addReg(hiTmp).addImm(Val & 0xFFFF); + return; + } + + assert(Class <= cInt && "Type not handled yet!"); + + if (C->getType() == Type::BoolTy) { + BuildMI(*MBB, IP, PPC32::ADDI, 2, R).addReg(PPC32::R0).addImm(C == ConstantBool::True); + } else if (Class == cByte || Class == cShort) { + ConstantInt *CI = cast(C); + BuildMI(*MBB, IP, PPC32::ADDI, 2, R).addReg(PPC32::R0).addImm(CI->getRawValue()); + } else { + ConstantInt *CI = cast(C); + int TheVal = CI->getRawValue() & 0xFFFFFFFF; + if (TheVal < 32768 && TheVal >= -32768) { + BuildMI(*MBB, IP, PPC32::ADDI, 2, R).addReg(PPC32::R0).addImm(CI->getRawValue()); + } else { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::ADDIS, 2, TmpReg).addReg(PPC32::R0).addImm(CI->getRawValue() >> 16); + BuildMI(*MBB, IP, PPC32::ORI, 2, R).addReg(TmpReg).addImm(CI->getRawValue() & 0xFFFF); + } + } + } else if (ConstantFP *CFP = dyn_cast(C)) { + // We need to spill the constant to memory... + MachineConstantPool *CP = F->getConstantPool(); + unsigned CPI = CP->getConstantPoolIndex(CFP); + const Type *Ty = CFP->getType(); + + assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); + unsigned LoadOpcode = Ty == Type::FloatTy ? PPC32::LFS : PPC32::LFD; + addConstantPoolReference(BuildMI(*MBB, IP, LoadOpcode, 2, R), CPI); + } else if (isa(C)) { + // Copy zero (null pointer) to the register. + BuildMI(*MBB, IP, PPC32::ADDI, 2, R).addReg(PPC32::R0).addImm(0); + } else if (ConstantPointerRef *CPR = dyn_cast(C)) { + BuildMI(*MBB, IP, PPC32::ADDIS, 2, R).addReg(PPC32::R0).addGlobalAddress(CPR->getValue()); + BuildMI(*MBB, IP, PPC32::ORI, 2, R).addReg(PPC32::R0).addGlobalAddress(CPR->getValue()); + } else { + std::cerr << "Offending constant: " << C << "\n"; + assert(0 && "Type not handled yet!"); + } +} + +/// LoadArgumentsToVirtualRegs - Load all of the arguments to this function from +/// the stack into virtual registers. +/// +/// FIXME: When we can calculate which args are coming in via registers +/// source them from there instead. +void ISel::LoadArgumentsToVirtualRegs(Function &Fn) { + unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot + unsigned GPR_remaining = 8; + unsigned FPR_remaining = 13; + unsigned GPR_idx = 3; + unsigned FPR_idx = 1; + + MachineFrameInfo *MFI = F->getFrameInfo(); + + for (Function::aiterator I = Fn.abegin(), E = Fn.aend(); I != E; ++I) { + bool ArgLive = !I->use_empty(); + unsigned Reg = ArgLive ? getReg(*I) : 0; + int FI; // Frame object index + + switch (getClassB(I->getType())) { + case cByte: + if (ArgLive) { + FI = MFI->CreateFixedObject(1, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, Reg).addReg(PPC32::R0+GPR_idx).addReg(PPC32::R0+GPR_idx); + } else { + addFrameReference(BuildMI(BB, PPC32::LBZ, 2, Reg), FI); + } + } + break; + case cShort: + if (ArgLive) { + FI = MFI->CreateFixedObject(2, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, Reg).addReg(PPC32::R0+GPR_idx).addReg(PPC32::R0+GPR_idx); + } else { + addFrameReference(BuildMI(BB, PPC32::LHZ, 2, Reg), FI); + } + } + break; + case cInt: + if (ArgLive) { + FI = MFI->CreateFixedObject(4, ArgOffset); + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, Reg).addReg(PPC32::R0+GPR_idx).addReg(PPC32::R0+GPR_idx); + } else { + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, Reg), FI); + } + } + break; + case cLong: + if (ArgLive) { + FI = MFI->CreateFixedObject(8, ArgOffset); + if (GPR_remaining > 1) { + BuildMI(BB, PPC32::OR, 2, Reg).addReg(PPC32::R0+GPR_idx).addReg(PPC32::R0+GPR_idx); + BuildMI(BB, PPC32::OR, 2, Reg+1).addReg(PPC32::R0+GPR_idx+1).addReg(PPC32::R0+GPR_idx+1); + } else { + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, Reg), FI); + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, Reg+1), FI, 4); + } + } + ArgOffset += 4; // longs require 4 additional bytes + if (GPR_remaining > 1) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + break; + case cFP: + if (ArgLive) { + unsigned Opcode; + if (I->getType() == Type::FloatTy) { + Opcode = PPC32::LFS; + FI = MFI->CreateFixedObject(4, ArgOffset); + } else { + Opcode = PPC32::LFD; + FI = MFI->CreateFixedObject(8, ArgOffset); + } + if (FPR_remaining > 0) { + BuildMI(BB, PPC32::FMR, 1, Reg).addReg(PPC32::F0+FPR_idx); + FPR_remaining--; + FPR_idx++; + } else { + addFrameReference(BuildMI(BB, Opcode, 2, Reg), FI); + } + } + if (I->getType() == Type::DoubleTy) { + ArgOffset += 4; // doubles require 4 additional bytes + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + break; + default: + assert(0 && "Unhandled argument type!"); + } + ArgOffset += 4; // Each argument takes at least 4 bytes on the stack... + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + + // If the function takes variable number of arguments, add a frame offset for + // the start of the first vararg value... this is used to expand + // llvm.va_start. + if (Fn.getFunctionType()->isVarArg()) + VarArgsFrameIndex = MFI->CreateFixedObject(1, ArgOffset); +} + + +/// SelectPHINodes - Insert machine code to generate phis. This is tricky +/// because we have to generate our sources into the source basic blocks, not +/// the current one. +/// +void ISel::SelectPHINodes() { + const TargetInstrInfo &TII = *TM.getInstrInfo(); + const Function &LF = *F->getFunction(); // The LLVM function... + for (Function::const_iterator I = LF.begin(), E = LF.end(); I != E; ++I) { + const BasicBlock *BB = I; + MachineBasicBlock &MBB = *MBBMap[I]; + + // Loop over all of the PHI nodes in the LLVM basic block... + MachineBasicBlock::iterator PHIInsertPoint = MBB.begin(); + for (BasicBlock::const_iterator I = BB->begin(); + PHINode *PN = const_cast(dyn_cast(I)); ++I) { + + // Create a new machine instr PHI node, and insert it. + unsigned PHIReg = getReg(*PN); + MachineInstr *PhiMI = BuildMI(MBB, PHIInsertPoint, + PPC32::PHI, PN->getNumOperands(), PHIReg); + + MachineInstr *LongPhiMI = 0; + if (PN->getType() == Type::LongTy || PN->getType() == Type::ULongTy) + LongPhiMI = BuildMI(MBB, PHIInsertPoint, + PPC32::PHI, PN->getNumOperands(), PHIReg+1); + + // PHIValues - Map of blocks to incoming virtual registers. We use this + // so that we only initialize one incoming value for a particular block, + // even if the block has multiple entries in the PHI node. + // + std::map PHIValues; + + for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { + MachineBasicBlock *PredMBB = MBBMap[PN->getIncomingBlock(i)]; + unsigned ValReg; + std::map::iterator EntryIt = + PHIValues.lower_bound(PredMBB); + + if (EntryIt != PHIValues.end() && EntryIt->first == PredMBB) { + // We already inserted an initialization of the register for this + // predecessor. Recycle it. + ValReg = EntryIt->second; + + } else { + // Get the incoming value into a virtual register. + // + Value *Val = PN->getIncomingValue(i); + + // If this is a constant or GlobalValue, we may have to insert code + // into the basic block to compute it into a virtual register. + if ((isa(Val) && !isa(Val)) || + isa(Val)) { + // Simple constants get emitted at the end of the basic block, + // before any terminator instructions. We "know" that the code to + // move a constant into a register will never clobber any flags. + ValReg = getReg(Val, PredMBB, PredMBB->getFirstTerminator()); + } else { + // Because we don't want to clobber any values which might be in + // physical registers with the computation of this constant (which + // might be arbitrarily complex if it is a constant expression), + // just insert the computation at the top of the basic block. + MachineBasicBlock::iterator PI = PredMBB->begin(); + + // Skip over any PHI nodes though! + while (PI != PredMBB->end() && PI->getOpcode() == PPC32::PHI) + ++PI; + + ValReg = getReg(Val, PredMBB, PI); + } + + // Remember that we inserted a value for this PHI for this predecessor + PHIValues.insert(EntryIt, std::make_pair(PredMBB, ValReg)); + } + + PhiMI->addRegOperand(ValReg); + PhiMI->addMachineBasicBlockOperand(PredMBB); + if (LongPhiMI) { + LongPhiMI->addRegOperand(ValReg+1); + LongPhiMI->addMachineBasicBlockOperand(PredMBB); + } + } + + // Now that we emitted all of the incoming values for the PHI node, make + // sure to reposition the InsertPoint after the PHI that we just added. + // This is needed because we might have inserted a constant into this + // block, right after the PHI's which is before the old insert point! + PHIInsertPoint = LongPhiMI ? LongPhiMI : PhiMI; + ++PHIInsertPoint; + } + } +} + + +// canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold +// it into the conditional branch or select instruction which is the only user +// of the cc instruction. This is the case if the conditional branch is the +// only user of the setcc, and if the setcc is in the same basic block as the +// conditional branch. We also don't handle long arguments below, so we reject +// them here as well. +// +static SetCondInst *canFoldSetCCIntoBranchOrSelect(Value *V) { + if (SetCondInst *SCI = dyn_cast(V)) + if (SCI->hasOneUse()) { + Instruction *User = cast(SCI->use_back()); + if ((isa(User) || isa(User)) && + SCI->getParent() == User->getParent() && + (getClassB(SCI->getOperand(0)->getType()) != cLong || + SCI->getOpcode() == Instruction::SetEQ || + SCI->getOpcode() == Instruction::SetNE)) + return SCI; + } + return 0; +} + +// Return a fixed numbering for setcc instructions which does not depend on the +// order of the opcodes. +// +static unsigned getSetCCNumber(unsigned Opcode) { + switch(Opcode) { + default: assert(0 && "Unknown setcc instruction!"); + case Instruction::SetEQ: return 0; + case Instruction::SetNE: return 1; + case Instruction::SetLT: return 2; + case Instruction::SetGE: return 3; + case Instruction::SetGT: return 4; + case Instruction::SetLE: return 5; + } +} + +/// emitUCOM - emits an unordered FP compare. +void ISel::emitUCOM(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP, + unsigned LHS, unsigned RHS) { + BuildMI(*MBB, IP, PPC32::FCMPU, 2, PPC32::CR0).addReg(LHS).addReg(RHS); +} + +// EmitComparison - This function emits a comparison of the two operands, +// returning the extended setcc code to use. +unsigned ISel::EmitComparison(unsigned OpNum, Value *Op0, Value *Op1, + MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP) { + // The arguments are already supposed to be of the same type. + const Type *CompTy = Op0->getType(); + unsigned Class = getClassB(CompTy); + unsigned Op0r = getReg(Op0, MBB, IP); + + // Special case handling of: cmp R, i + if (isa(Op1)) { + BuildMI(*MBB, IP, PPC32::CMPI, 2, PPC32::CR0).addReg(Op0r).addImm(0); + } else if (ConstantInt *CI = dyn_cast(Op1)) { + if (Class == cByte || Class == cShort || Class == cInt) { + unsigned Op1v = CI->getRawValue(); + + // Mask off any upper bits of the constant, if there are any... + Op1v &= (1ULL << (8 << Class)) - 1; + + // Compare immediate or promote to reg? + if (Op1v <= 32767) { + BuildMI(*MBB, IP, CompTy->isSigned() ? PPC32::CMPI : PPC32::CMPLI, 3, PPC32::CR0).addImm(0).addReg(Op0r).addImm(Op1v); + } else { + unsigned Op1r = getReg(Op1, MBB, IP); + BuildMI(*MBB, IP, CompTy->isSigned() ? PPC32::CMP : PPC32::CMPL, 3, PPC32::CR0).addImm(0).addReg(Op0r).addReg(Op1r); + } + return OpNum; + } else { + assert(Class == cLong && "Unknown integer class!"); + unsigned LowCst = CI->getRawValue(); + unsigned HiCst = CI->getRawValue() >> 32; + if (OpNum < 2) { // seteq, setne + unsigned LoTmp = Op0r; + if (LowCst != 0) { + unsigned LoLow = makeAnotherReg(Type::IntTy); + unsigned LoTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::XORI, 2, LoLow).addReg(Op0r).addImm(LowCst); + BuildMI(*MBB, IP, PPC32::XORIS, 2, LoTmp).addReg(LoLow).addImm(LowCst >> 16); + } + unsigned HiTmp = Op0r+1; + if (HiCst != 0) { + unsigned HiLow = makeAnotherReg(Type::IntTy); + unsigned HiTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::XORI, 2, HiLow).addReg(Op0r+1).addImm(HiCst); + BuildMI(*MBB, IP, PPC32::XORIS, 2, HiTmp).addReg(HiLow).addImm(HiCst >> 16); + } + unsigned FinalTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::ORo, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp); + //BuildMI(*MBB, IP, PPC32::CMPLI, 2, PPC32::CR0).addReg(FinalTmp).addImm(0); + return OpNum; + } else { + // Emit a sequence of code which compares the high and low parts once + // each, then uses a conditional move to handle the overflow case. For + // example, a setlt for long would generate code like this: + // + // AL = lo(op1) < lo(op2) // Always unsigned comparison + // BL = hi(op1) < hi(op2) // Signedness depends on operands + // dest = hi(op1) == hi(op2) ? BL : AL; + // + + // FIXME: Not Yet Implemented + return OpNum; + } + } + } + + unsigned Op1r = getReg(Op1, MBB, IP); + switch (Class) { + default: assert(0 && "Unknown type class!"); + case cByte: + case cShort: + case cInt: + BuildMI(*MBB, IP, CompTy->isSigned() ? PPC32::CMP : PPC32::CMPL, 2, PPC32::CR0).addReg(Op0r).addReg(Op1r); + break; + case cFP: + emitUCOM(MBB, IP, Op0r, Op1r); + break; + + case cLong: + if (OpNum < 2) { // seteq, setne + unsigned LoTmp = makeAnotherReg(Type::IntTy); + unsigned HiTmp = makeAnotherReg(Type::IntTy); + unsigned FinalTmp = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::XOR, 2, LoTmp).addReg(Op0r).addReg(Op1r); + BuildMI(*MBB, IP, PPC32::XOR, 2, HiTmp).addReg(Op0r+1).addReg(Op1r+1); + BuildMI(*MBB, IP, PPC32::ORo, 2, FinalTmp).addReg(LoTmp).addReg(HiTmp); + //BuildMI(*MBB, IP, PPC32::CMPLI, 2, PPC32::CR0).addReg(FinalTmp).addImm(0); + break; // Allow the sete or setne to be generated from flags set by OR + } else { + // Emit a sequence of code which compares the high and low parts once + // each, then uses a conditional move to handle the overflow case. For + // example, a setlt for long would generate code like this: + // + // AL = lo(op1) < lo(op2) // Signedness depends on operands + // BL = hi(op1) < hi(op2) // Always unsigned comparison + // dest = hi(op1) == hi(op2) ? BL : AL; + // + + // FIXME: Not Yet Implemented + return OpNum; + } + } + return OpNum; +} + +/// SetCC instructions - Here we just emit boilerplate code to set a byte-sized +/// register, then move it to wherever the result should be. +/// +void ISel::visitSetCondInst(SetCondInst &I) { + if (canFoldSetCCIntoBranchOrSelect(&I)) + return; // Fold this into a branch or select. + + unsigned DestReg = getReg(I); + MachineBasicBlock::iterator MII = BB->end(); + emitSetCCOperation(BB, MII, I.getOperand(0), I.getOperand(1), I.getOpcode(),DestReg); +} + +/// emitSetCCOperation - Common code shared between visitSetCondInst and +/// constant expression support. +/// +/// FIXME: this is wrong. we should figure out a way to guarantee +/// TargetReg is a CR and then make it a no-op +void ISel::emitSetCCOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned Opcode, + unsigned TargetReg) { + unsigned OpNum = getSetCCNumber(Opcode); + OpNum = EmitComparison(OpNum, Op0, Op1, MBB, IP); + + // The value is already in CR0 at this point, do nothing. +} + + +void ISel::visitSelectInst(SelectInst &SI) { + unsigned DestReg = getReg(SI); + MachineBasicBlock::iterator MII = BB->end(); + emitSelectOperation(BB, MII, SI.getCondition(), SI.getTrueValue(),SI.getFalseValue(), DestReg); +} + +/// emitSelect - Common code shared between visitSelectInst and the constant +/// expression support. +/// FIXME: this is most likely broken in one or more ways. Namely, PowerPC has +/// no select instruction. FSEL only works for comparisons against zero. +void ISel::emitSelectOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Cond, Value *TrueVal, Value *FalseVal, + unsigned DestReg) { + unsigned SelectClass = getClassB(TrueVal->getType()); + + unsigned TrueReg = getReg(TrueVal, MBB, IP); + unsigned FalseReg = getReg(FalseVal, MBB, IP); + + if (TrueReg == FalseReg) { + if (SelectClass == cFP) { + BuildMI(*MBB, IP, PPC32::FMR, 1, DestReg).addReg(TrueReg); + } else { + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(TrueReg).addReg(TrueReg); + } + + if (SelectClass == cLong) + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(TrueReg+1).addReg(TrueReg+1); + return; + } + + unsigned CondReg = getReg(Cond, MBB, IP); + unsigned numZeros = makeAnotherReg(Type::IntTy); + unsigned falseHi = makeAnotherReg(Type::IntTy); + unsigned falseAll = makeAnotherReg(Type::IntTy); + unsigned trueAll = makeAnotherReg(Type::IntTy); + unsigned Temp1 = makeAnotherReg(Type::IntTy); + unsigned Temp2 = makeAnotherReg(Type::IntTy); + + BuildMI(*MBB, IP, PPC32::CNTLZW, 1, numZeros).addReg(CondReg); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, falseHi).addReg(numZeros).addImm(26).addImm(0).addImm(0); + BuildMI(*MBB, IP, PPC32::SRAWI, 2, falseAll).addReg(falseHi).addImm(31); + BuildMI(*MBB, IP, PPC32::NOR, 2, trueAll).addReg(falseAll).addReg(falseAll); + BuildMI(*MBB, IP, PPC32::AND, 2, Temp1).addReg(TrueReg).addReg(trueAll); + BuildMI(*MBB, IP, PPC32::AND, 2, Temp2).addReg(FalseReg).addReg(falseAll); + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(Temp1).addReg(Temp2); + + if (SelectClass == cLong) { + unsigned Temp3 = makeAnotherReg(Type::IntTy); + unsigned Temp4 = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::AND, 2, Temp3).addReg(TrueReg+1).addReg(trueAll); + BuildMI(*MBB, IP, PPC32::AND, 2, Temp4).addReg(FalseReg+1).addReg(falseAll); + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(Temp3).addReg(Temp4); + } + + return; +} + + + +/// promote32 - Emit instructions to turn a narrow operand into a 32-bit-wide +/// operand, in the specified target register. +/// +void ISel::promote32(unsigned targetReg, const ValueRecord &VR) { + bool isUnsigned = VR.Ty->isUnsigned() || VR.Ty == Type::BoolTy; + + Value *Val = VR.Val; + const Type *Ty = VR.Ty; + if (Val) { + if (Constant *C = dyn_cast(Val)) { + Val = ConstantExpr::getCast(C, Type::IntTy); + Ty = Type::IntTy; + } + + // If this is a simple constant, just emit a load directly to avoid the copy. + if (ConstantInt *CI = dyn_cast(Val)) { + int TheVal = CI->getRawValue() & 0xFFFFFFFF; + + if (TheVal < 32768 && TheVal >= -32768) { + BuildMI(BB, PPC32::ADDI, 2, targetReg).addReg(PPC32::R0).addImm(TheVal); + } else { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + BuildMI(BB, PPC32::ADDIS, 2, TmpReg).addReg(PPC32::R0).addImm(TheVal >> 16); + BuildMI(BB, PPC32::ORI, 2, targetReg).addReg(TmpReg).addImm(TheVal & 0xFFFF); + } + return; + } + } + + // Make sure we have the register number for this value... + unsigned Reg = Val ? getReg(Val) : VR.Reg; + + switch (getClassB(Ty)) { + case cByte: + // Extend value into target register (8->32) + if (isUnsigned) + BuildMI(BB, PPC32::RLWINM, 4, targetReg).addReg(Reg).addZImm(0).addZImm(24).addZImm(31); + else + BuildMI(BB, PPC32::EXTSB, 1, targetReg).addReg(Reg); + break; + case cShort: + // Extend value into target register (16->32) + if (isUnsigned) + BuildMI(BB, PPC32::RLWINM, 4, targetReg).addReg(Reg).addZImm(0).addZImm(16).addZImm(31); + else + BuildMI(BB, PPC32::EXTSH, 1, targetReg).addReg(Reg); + break; + case cInt: + // Move value into target register (32->32) + BuildMI(BB, PPC32::ORI, 2, targetReg).addReg(Reg).addReg(Reg); + break; + default: + assert(0 && "Unpromotable operand class in promote32"); + } +} + +// just emit blr. +void ISel::visitReturnInst(ReturnInst &I) { + Value *RetVal = I.getOperand(0); + + switch (getClassB(RetVal->getType())) { + case cByte: // integral return values: extend or move into r3 and return + case cShort: + case cInt: + promote32(PPC32::R3, ValueRecord(RetVal)); + break; + case cFP: { // Floats & Doubles: Return in f1 + unsigned RetReg = getReg(RetVal); + BuildMI(BB, PPC32::FMR, 1, PPC32::F1).addReg(RetReg); + break; + } + case cLong: { + unsigned RetReg = getReg(RetVal); + BuildMI(BB, PPC32::OR, 2, PPC32::R3).addReg(RetReg).addReg(RetReg); + BuildMI(BB, PPC32::OR, 2, PPC32::R4).addReg(RetReg+1).addReg(RetReg+1); + break; + } + default: + visitInstruction(I); + } + BuildMI(BB, PPC32::BLR, 1).addImm(0); +} + +// getBlockAfter - Return the basic block which occurs lexically after the +// specified one. +static inline BasicBlock *getBlockAfter(BasicBlock *BB) { + Function::iterator I = BB; ++I; // Get iterator to next block + return I != BB->getParent()->end() ? &*I : 0; +} + +/// visitBranchInst - Handle conditional and unconditional branches here. Note +/// that since code layout is frozen at this point, that if we are trying to +/// jump to a block that is the immediate successor of the current block, we can +/// just make a fall-through (but we don't currently). +/// +void ISel::visitBranchInst(BranchInst &BI) { + // Update machine-CFG edges + BB->addSuccessor (MBBMap[BI.getSuccessor(0)]); + if (BI.isConditional()) + BB->addSuccessor (MBBMap[BI.getSuccessor(1)]); + + BasicBlock *NextBB = getBlockAfter(BI.getParent()); // BB after current one + + if (!BI.isConditional()) { // Unconditional branch? + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, PPC32::B, 1).addMBB(MBBMap[BI.getSuccessor(0)]); + return; + } + + // See if we can fold the setcc into the branch itself... + SetCondInst *SCI = canFoldSetCCIntoBranchOrSelect(BI.getCondition()); + if (SCI == 0) { + // Nope, cannot fold setcc into this branch. Emit a branch on a condition + // computed some other way... + unsigned condReg = getReg(BI.getCondition()); + BuildMI(BB, PPC32::CMPLI, 3, PPC32::CR0).addImm(0).addReg(condReg).addImm(0); + if (BI.getSuccessor(1) == NextBB) { + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, PPC32::BC, 3).addImm(4).addImm(2).addMBB(MBBMap[BI.getSuccessor(0)]); + } else { + BuildMI(BB, PPC32::BC, 3).addImm(12).addImm(2).addMBB(MBBMap[BI.getSuccessor(1)]); + + if (BI.getSuccessor(0) != NextBB) + BuildMI(BB, PPC32::B, 1).addMBB(MBBMap[BI.getSuccessor(0)]); + } + return; + } + + + unsigned OpNum = getSetCCNumber(SCI->getOpcode()); + MachineBasicBlock::iterator MII = BB->end(); + OpNum = EmitComparison(OpNum, SCI->getOperand(0), SCI->getOperand(1), BB,MII); + + const Type *CompTy = SCI->getOperand(0)->getType(); + bool isSigned = CompTy->isSigned() && getClassB(CompTy) != cFP; + + // LLVM -> X86 signed X86 unsigned + // ----- ---------- ------------ + // seteq -> je je + // setne -> jne jne + // setlt -> jl jb + // setge -> jge jae + // setgt -> jg ja + // setle -> jle jbe + + static const unsigned BITab[6] = { 2, 2, 0, 0, 1, 1 }; + unsigned BO_true = (OpNum % 2 == 0) ? 12 : 4; + unsigned BO_false = (OpNum % 2 == 0) ? 4 : 12; + unsigned BIval = BITab[0]; + + if (BI.getSuccessor(0) != NextBB) { + BuildMI(BB, PPC32::BC, 3).addImm(BO_true).addImm(BIval).addMBB(MBBMap[BI.getSuccessor(0)]); + if (BI.getSuccessor(1) != NextBB) + BuildMI(BB, PPC32::B, 1).addMBB(MBBMap[BI.getSuccessor(1)]); + } else { + // Change to the inverse condition... + if (BI.getSuccessor(1) != NextBB) { + BuildMI(BB, PPC32::BC, 3).addImm(BO_false).addImm(BIval).addMBB(MBBMap[BI.getSuccessor(1)]); + } + } +} + + +/// doCall - This emits an abstract call instruction, setting up the arguments +/// and the return value as appropriate. For the actual function call itself, +/// it inserts the specified CallMI instruction into the stream. +/// +/// FIXME: See Documentation at the following URL for "correct" behavior +/// +void ISel::doCall(const ValueRecord &Ret, MachineInstr *CallMI, + const std::vector &Args) { + // Count how many bytes are to be pushed on the stack... + unsigned NumBytes = 0; + + if (!Args.empty()) { + for (unsigned i = 0, e = Args.size(); i != e; ++i) + switch (getClassB(Args[i].Ty)) { + case cByte: case cShort: case cInt: + NumBytes += 4; break; + case cLong: + NumBytes += 8; break; + case cFP: + NumBytes += Args[i].Ty == Type::FloatTy ? 4 : 8; + break; + default: assert(0 && "Unknown class!"); + } + + // Adjust the stack pointer for the new arguments... + BuildMI(BB, PPC32::ADJCALLSTACKDOWN, 1).addImm(NumBytes); + + // Arguments go on the stack in reverse order, as specified by the ABI. + unsigned ArgOffset = 0; + unsigned GPR_remaining = 8; + unsigned FPR_remaining = 13; + unsigned GPR_idx = 3; + unsigned FPR_idx = 1; + + for (unsigned i = 0, e = Args.size(); i != e; ++i) { + unsigned ArgReg; + switch (getClassB(Args[i].Ty)) { + case cByte: + case cShort: + // Promote arg to 32 bits wide into a temporary register... + ArgReg = makeAnotherReg(Type::UIntTy); + promote32(ArgReg, Args[i]); + + // Reg or stack? + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, PPC32::R0 + GPR_idx).addReg(ArgReg).addReg(ArgReg); + } else { + BuildMI(BB, PPC32::STW, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + } + break; + case cInt: + ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg; + + // Reg or stack? + if (GPR_remaining > 0) { + BuildMI(BB, PPC32::OR, 2, PPC32::R0 + GPR_idx).addReg(ArgReg).addReg(ArgReg); + } else { + BuildMI(BB, PPC32::STW, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + } + break; + case cLong: + ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg; + + // Reg or stack? + if (GPR_remaining > 1) { + BuildMI(BB, PPC32::OR, 2, PPC32::R0 + GPR_idx).addReg(ArgReg).addReg(ArgReg); + BuildMI(BB, PPC32::OR, 2, PPC32::R0 + GPR_idx + 1).addReg(ArgReg+1).addReg(ArgReg+1); + } else { + BuildMI(BB, PPC32::STW, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + BuildMI(BB, PPC32::STW, 3).addReg(ArgReg+1).addImm(ArgOffset+4).addReg(PPC32::R1); + } + + ArgOffset += 4; // 8 byte entry, not 4. + if (GPR_remaining > 0) { + GPR_remaining -= 1; // uses up 2 GPRs + GPR_idx += 1; + } + break; + case cFP: + ArgReg = Args[i].Val ? getReg(Args[i].Val) : Args[i].Reg; + if (Args[i].Ty == Type::FloatTy) { + // Reg or stack? + if (FPR_remaining > 0) { + BuildMI(BB, PPC32::FMR, 1, PPC32::F0 + FPR_idx).addReg(ArgReg); + FPR_remaining--; + FPR_idx++; + } else { + BuildMI(BB, PPC32::STFS, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + } + } else { + assert(Args[i].Ty == Type::DoubleTy && "Unknown FP type!"); + // Reg or stack? + if (FPR_remaining > 0) { + BuildMI(BB, PPC32::FMR, 1, PPC32::F0 + FPR_idx).addReg(ArgReg); + FPR_remaining--; + FPR_idx++; + } else { + BuildMI(BB, PPC32::STFD, 3).addReg(ArgReg).addImm(ArgOffset).addReg(PPC32::R1); + } + + ArgOffset += 4; // 8 byte entry, not 4. + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + break; + + default: assert(0 && "Unknown class!"); + } + ArgOffset += 4; + if (GPR_remaining > 0) { + GPR_remaining--; // uses up 2 GPRs + GPR_idx++; + } + } + } else { + BuildMI(BB, PPC32::ADJCALLSTACKDOWN, 1).addImm(0); + } + + BB->push_back(CallMI); + + BuildMI(BB, PPC32::ADJCALLSTACKUP, 1).addImm(NumBytes); + + // If there is a return value, scavenge the result from the location the call + // leaves it in... + // + if (Ret.Ty != Type::VoidTy) { + unsigned DestClass = getClassB(Ret.Ty); + switch (DestClass) { + case cByte: + case cShort: + case cInt: + // Integral results are in r3 + BuildMI(BB, PPC32::OR, 2, Ret.Reg).addReg(PPC32::R3).addReg(PPC32::R3); + case cFP: // Floating-point return values live in f1 + BuildMI(BB, PPC32::FMR, 1, Ret.Reg).addReg(PPC32::F1); + break; + case cLong: // Long values are in r3:r4 + BuildMI(BB, PPC32::OR, 2, Ret.Reg).addReg(PPC32::R3).addReg(PPC32::R3); + BuildMI(BB, PPC32::OR, 2, Ret.Reg+1).addReg(PPC32::R4).addReg(PPC32::R4); + break; + default: assert(0 && "Unknown class!"); + } + } +} + + +/// visitCallInst - Push args on stack and do a procedure call instruction. +void ISel::visitCallInst(CallInst &CI) { + MachineInstr *TheCall; + if (Function *F = CI.getCalledFunction()) { + // Is it an intrinsic function call? + if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID()) { + visitIntrinsicCall(ID, CI); // Special intrinsics are not handled here + return; + } + + // Emit a CALL instruction with PC-relative displacement. + TheCall = BuildMI(PPC32::CALLpcrel, 1).addGlobalAddress(F, true); + } else { // Emit an indirect call through the CTR + unsigned Reg = getReg(CI.getCalledValue()); + BuildMI(PPC32::MTSPR, 2).addZImm(9).addReg(Reg); + TheCall = BuildMI(PPC32::CALLindirect, 1).addZImm(20).addZImm(0); + } + + std::vector Args; + for (unsigned i = 1, e = CI.getNumOperands(); i != e; ++i) + Args.push_back(ValueRecord(CI.getOperand(i))); + + unsigned DestReg = CI.getType() != Type::VoidTy ? getReg(CI) : 0; + doCall(ValueRecord(DestReg, CI.getType()), TheCall, Args); +} + + +/// dyncastIsNan - Return the operand of an isnan operation if this is an isnan. +/// +static Value *dyncastIsNan(Value *V) { + if (CallInst *CI = dyn_cast(V)) + if (Function *F = CI->getCalledFunction()) + if (F->getIntrinsicID() == Intrinsic::isnan) + return CI->getOperand(1); + return 0; +} + +/// isOnlyUsedByUnorderedComparisons - Return true if this value is only used by +/// or's whos operands are all calls to the isnan predicate. +static bool isOnlyUsedByUnorderedComparisons(Value *V) { + assert(dyncastIsNan(V) && "The value isn't an isnan call!"); + + // Check all uses, which will be or's of isnans if this predicate is true. + for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){ + Instruction *I = cast(*UI); + if (I->getOpcode() != Instruction::Or) return false; + if (I->getOperand(0) != V && !dyncastIsNan(I->getOperand(0))) return false; + if (I->getOperand(1) != V && !dyncastIsNan(I->getOperand(1))) return false; + } + + return true; +} + +/// LowerUnknownIntrinsicFunctionCalls - This performs a prepass over the +/// function, lowering any calls to unknown intrinsic functions into the +/// equivalent LLVM code. +/// +void ISel::LowerUnknownIntrinsicFunctionCalls(Function &F) { + for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) + for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) + if (CallInst *CI = dyn_cast(I++)) + if (Function *F = CI->getCalledFunction()) + switch (F->getIntrinsicID()) { + case Intrinsic::not_intrinsic: + case Intrinsic::vastart: + case Intrinsic::vacopy: + case Intrinsic::vaend: + case Intrinsic::returnaddress: + case Intrinsic::frameaddress: + case Intrinsic::isnan: + // We directly implement these intrinsics + break; + case Intrinsic::readio: { + // On PPC, memory operations are in-order. Lower this intrinsic + // into a volatile load. + Instruction *Before = CI->getPrev(); + LoadInst * LI = new LoadInst(CI->getOperand(1), "", true, CI); + CI->replaceAllUsesWith(LI); + BB->getInstList().erase(CI); + break; + } + case Intrinsic::writeio: { + // On PPC, memory operations are in-order. Lower this intrinsic + // into a volatile store. + Instruction *Before = CI->getPrev(); + StoreInst *LI = new StoreInst(CI->getOperand(1), + CI->getOperand(2), true, CI); + CI->replaceAllUsesWith(LI); + BB->getInstList().erase(CI); + break; + } + default: + // All other intrinsic calls we must lower. + Instruction *Before = CI->getPrev(); + TM.getIntrinsicLowering().LowerIntrinsicCall(CI); + if (Before) { // Move iterator to instruction after call + I = Before; ++I; + } else { + I = BB->begin(); + } + } +} + +void ISel::visitIntrinsicCall(Intrinsic::ID ID, CallInst &CI) { + unsigned TmpReg1, TmpReg2, TmpReg3; + switch (ID) { + case Intrinsic::vastart: + // Get the address of the first vararg value... + TmpReg1 = getReg(CI); + addFrameReference(BuildMI(BB, PPC32::ADDI, 2, TmpReg1), VarArgsFrameIndex); + return; + + case Intrinsic::vacopy: + TmpReg1 = getReg(CI); + TmpReg2 = getReg(CI.getOperand(1)); + BuildMI(BB, PPC32::OR, 2, TmpReg1).addReg(TmpReg2).addReg(TmpReg2); + return; + case Intrinsic::vaend: return; + + case Intrinsic::returnaddress: + case Intrinsic::frameaddress: + TmpReg1 = getReg(CI); + if (cast(CI.getOperand(1))->isNullValue()) { + if (ID == Intrinsic::returnaddress) { + // Just load the return address + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, TmpReg1), + ReturnAddressIndex); + } else { + addFrameReference(BuildMI(BB, PPC32::ADDI, 2, TmpReg1), + ReturnAddressIndex, -4, false); + } + } else { + // Values other than zero are not implemented yet. + BuildMI(BB, PPC32::ADDI, 2, TmpReg1).addReg(PPC32::R0).addImm(0); + } + return; + + case Intrinsic::isnan: + // If this is only used by 'isunordered' style comparisons, don't emit it. + if (isOnlyUsedByUnorderedComparisons(&CI)) return; + TmpReg1 = getReg(CI.getOperand(1)); + emitUCOM(BB, BB->end(), TmpReg1, TmpReg1); + TmpReg2 = makeAnotherReg(Type::IntTy); + BuildMI(BB, PPC32::MFCR, TmpReg2); + TmpReg3 = getReg(CI); + BuildMI(BB, PPC32::RLWINM, 4, TmpReg3).addReg(TmpReg2).addImm(4).addImm(31).addImm(31); + return; + + default: assert(0 && "Error: unknown intrinsics should have been lowered!"); + } +} + +/// visitSimpleBinary - Implement simple binary operators for integral types... +/// OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for Or, 4 for +/// Xor. +/// +void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) { + unsigned DestReg = getReg(B); + MachineBasicBlock::iterator MI = BB->end(); + Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1); + unsigned Class = getClassB(B.getType()); + + emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg); +} + +/// emitBinaryFPOperation - This method handles emission of floating point +/// Add (0), Sub (1), Mul (2), and Div (3) operations. +void ISel::emitBinaryFPOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned DestReg) { + + // Special case: op Reg, + if (ConstantFP *Op1C = dyn_cast(Op1)) { + // Create a constant pool entry for this constant. + MachineConstantPool *CP = F->getConstantPool(); + unsigned CPI = CP->getConstantPoolIndex(Op1C); + const Type *Ty = Op1->getType(); + + static const unsigned OpcodeTab[][4] = { + { PPC32::FADDS, PPC32::FSUBS, PPC32::FMULS, PPC32::FDIVS }, // Float + { PPC32::FADD, PPC32::FSUB, PPC32::FMUL, PPC32::FDIV }, // Double + }; + + assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); + unsigned TempReg = makeAnotherReg(Ty); + unsigned LoadOpcode = Ty == Type::FloatTy ? PPC32::LFS : PPC32::LFD; + addConstantPoolReference(BuildMI(*BB, IP, LoadOpcode, 2, TempReg), CPI); + + unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass]; + unsigned Op0r = getReg(Op0, BB, IP); + BuildMI(*BB, IP, Opcode, DestReg).addReg(Op0r).addReg(TempReg); + return; + } + + // Special case: R1 = op , R2 + if (ConstantFP *CFP = dyn_cast(Op0)) + if (CFP->isExactlyValue(-0.0) && OperatorClass == 1) { + // -0.0 - X === -X + unsigned op1Reg = getReg(Op1, BB, IP); + BuildMI(*BB, IP, PPC32::FNEG, 1, DestReg).addReg(op1Reg); + return; + } else { + // R1 = op CST, R2 --> R1 = opr R2, CST + + // Create a constant pool entry for this constant. + MachineConstantPool *CP = F->getConstantPool(); + unsigned CPI = CP->getConstantPoolIndex(CFP); + const Type *Ty = CFP->getType(); + + static const unsigned OpcodeTab[][4] = { + { PPC32::FADDS, PPC32::FSUBS, PPC32::FMULS, PPC32::FDIVS }, // Float + { PPC32::FADD, PPC32::FSUB, PPC32::FMUL, PPC32::FDIV }, // Double + }; + + assert(Ty == Type::FloatTy || Ty == Type::DoubleTy && "Unknown FP type!"); + unsigned TempReg = makeAnotherReg(Ty); + unsigned LoadOpcode = Ty == Type::FloatTy ? PPC32::LFS : PPC32::LFD; + addConstantPoolReference(BuildMI(*BB, IP, LoadOpcode, 2, TempReg), CPI); + + unsigned Opcode = OpcodeTab[Ty != Type::FloatTy][OperatorClass]; + unsigned Op1r = getReg(Op1, BB, IP); + BuildMI(*BB, IP, Opcode, DestReg).addReg(TempReg).addReg(Op1r); + return; + } + + // General case. + static const unsigned OpcodeTab[4] = { + PPC32::FADD, PPC32::FSUB, PPC32::FMUL, PPC32::FDIV + }; + + unsigned Opcode = OpcodeTab[OperatorClass]; + unsigned Op0r = getReg(Op0, BB, IP); + unsigned Op1r = getReg(Op1, BB, IP); + BuildMI(*BB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); +} + +/// emitSimpleBinaryOperation - Implement simple binary operators for integral +/// types... OperatorClass is one of: 0 for Add, 1 for Sub, 2 for And, 3 for +/// Or, 4 for Xor. +/// +/// emitSimpleBinaryOperation - Common code shared between visitSimpleBinary +/// and constant expression support. +/// +void ISel::emitSimpleBinaryOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, + unsigned OperatorClass, unsigned DestReg) { + unsigned Class = getClassB(Op0->getType()); + + // Arithmetic and Bitwise operators + static const unsigned OpcodeTab[5] = { + PPC32::ADD, PPC32::SUB, PPC32::AND, PPC32::OR, PPC32::XOR + }; + // Otherwise, code generate the full operation with a constant. + static const unsigned BottomTab[] = { + PPC32::ADDC, PPC32::SUBC, PPC32::AND, PPC32::OR, PPC32::XOR + }; + static const unsigned TopTab[] = { + PPC32::ADDE, PPC32::SUBFE, PPC32::AND, PPC32::OR, PPC32::XOR + }; + + if (Class == cFP) { + assert(OperatorClass < 2 && "No logical ops for FP!"); + emitBinaryFPOperation(MBB, IP, Op0, Op1, OperatorClass, DestReg); + return; + } + + if (Op0->getType() == Type::BoolTy) { + if (OperatorClass == 3) + // If this is an or of two isnan's, emit an FP comparison directly instead + // of or'ing two isnan's together. + if (Value *LHS = dyncastIsNan(Op0)) + if (Value *RHS = dyncastIsNan(Op1)) { + unsigned Op0Reg = getReg(RHS, MBB, IP), Op1Reg = getReg(LHS, MBB, IP); + unsigned TmpReg = makeAnotherReg(Type::IntTy); + emitUCOM(MBB, IP, Op0Reg, Op1Reg); + BuildMI(*MBB, IP, PPC32::MFCR, TmpReg); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(TmpReg).addImm(4).addImm(31).addImm(31); + return; + } + } + + // sub 0, X -> neg X + if (ConstantInt *CI = dyn_cast(Op0)) + if (OperatorClass == 1 && CI->isNullValue()) { + unsigned op1Reg = getReg(Op1, MBB, IP); + BuildMI(*MBB, IP, PPC32::NEG, 1, DestReg).addReg(op1Reg); + + if (Class == cLong) { + unsigned zeroes = makeAnotherReg(Type::IntTy); + unsigned overflow = makeAnotherReg(Type::IntTy); + unsigned T = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::CNTLZW, 1, zeroes).addReg(op1Reg); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, overflow).addReg(zeroes).addImm(27).addImm(5).addImm(31); + BuildMI(*MBB, IP, PPC32::ADD, 2, T).addReg(op1Reg+1).addReg(overflow); + BuildMI(*MBB, IP, PPC32::NEG, 1, DestReg+1).addReg(T); + } + return; + } + + // Special case: op Reg, + if (ConstantInt *Op1C = dyn_cast(Op1)) { + unsigned Op0r = getReg(Op0, MBB, IP); + + // xor X, -1 -> not X + if (OperatorClass == 4 && Op1C->isAllOnesValue()) { + BuildMI(*MBB, IP, PPC32::NOR, 2, DestReg).addReg(Op0r).addReg(Op0r); + if (Class == cLong) // Invert the top part too + BuildMI(*MBB, IP, PPC32::NOR, 2, DestReg+1).addReg(Op0r+1).addReg(Op0r+1); + return; + } + + unsigned Opcode = OpcodeTab[OperatorClass]; + unsigned Op1r = getReg(Op1, MBB, IP); + + if (Class != cLong) { + BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); + return; + } + + // If the constant is zero in the low 32-bits, just copy the low part + // across and apply the normal 32-bit operation to the high parts. There + // will be no carry or borrow into the top. + if (cast(Op1C)->getRawValue() == 0) { + if (OperatorClass != 2) // All but and... + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(Op0r).addReg(Op0r); + else + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg).addReg(PPC32::R0).addImm(0); + BuildMI(*MBB, IP, Opcode, 2, DestReg+1).addReg(Op0r+1).addReg(Op1r+1); + return; + } + + // If this is a long value and the high or low bits have a special + // property, emit some special cases. + unsigned Op1h = cast(Op1C)->getRawValue() >> 32LL; + + // If this is a logical operation and the top 32-bits are zero, just + // operate on the lower 32. + if (Op1h == 0 && OperatorClass > 1) { + BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); + if (OperatorClass != 2) // All but and + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(Op0r+1).addReg(Op0r+1); + else + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg+1).addReg(PPC32::R0).addImm(0); + return; + } + + // TODO: We could handle lots of other special cases here, such as AND'ing + // with 0xFFFFFFFF00000000 -> noop, etc. + + BuildMI(*MBB, IP, BottomTab[OperatorClass], 2, DestReg).addReg(Op0r).addImm(Op1r); + BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1).addReg(Op0r+1).addImm(Op1r+1); + return; + } + + unsigned Op0r = getReg(Op0, MBB, IP); + unsigned Op1r = getReg(Op1, MBB, IP); + + if (Class != cLong) { + unsigned Opcode = OpcodeTab[OperatorClass]; + BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r); + } else { + BuildMI(*MBB, IP, BottomTab[OperatorClass], 2, DestReg).addReg(Op0r).addImm(Op1r); + BuildMI(*MBB, IP, TopTab[OperatorClass], 2, DestReg+1).addReg(Op0r+1).addImm(Op1r+1); + } + return; +} + +/// doMultiply - Emit appropriate instructions to multiply together the +/// registers op0Reg and op1Reg, and put the result in DestReg. The type of the +/// result should be given as DestTy. +/// +void ISel::doMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator MBBI, + unsigned DestReg, const Type *DestTy, + unsigned op0Reg, unsigned op1Reg) { + unsigned Class = getClass(DestTy); + switch (Class) { + case cLong: + BuildMI(*MBB, MBBI, PPC32::MULHW, 2, DestReg+1).addReg(op0Reg+1).addReg(op1Reg+1); + case cInt: + case cShort: + case cByte: + BuildMI(*MBB, MBBI, PPC32::MULLW, 2, DestReg).addReg(op0Reg).addReg(op1Reg); + return; + default: + assert(0 && "doMultiply cannot operate on unknown type!"); + } +} + +// ExactLog2 - This function solves for (Val == 1 << (N-1)) and returns N. It +// returns zero when the input is not exactly a power of two. +static unsigned ExactLog2(unsigned Val) { + if (Val == 0 || (Val & (Val-1))) return 0; + unsigned Count = 0; + while (Val != 1) { + Val >>= 1; + ++Count; + } + return Count+1; +} + + +/// doMultiplyConst - This function is specialized to efficiently codegen an 8, +/// 16, or 32-bit integer multiply by a constant. +void ISel::doMultiplyConst(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + unsigned DestReg, const Type *DestTy, + unsigned op0Reg, unsigned ConstRHS) { + unsigned Class = getClass(DestTy); + // Handle special cases here. + switch (ConstRHS) { + case 0: + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg).addReg(PPC32::R0).addImm(0); + return; + case 1: + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(op0Reg).addReg(op0Reg); + return; + case 2: + BuildMI(*MBB, IP, PPC32::ADD, 2,DestReg).addReg(op0Reg).addReg(op0Reg); + return; + } + + // If the element size is exactly a power of 2, use a shift to get it. + if (unsigned Shift = ExactLog2(ConstRHS)) { + switch (Class) { + default: assert(0 && "Unknown class for this function!"); + case cByte: + case cShort: + case cInt: + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(op0Reg).addImm(Shift-1).addImm(0).addImm(31-Shift-1); + return; + } + } + + // Most general case, emit a normal multiply... + unsigned TmpReg1 = makeAnotherReg(Type::IntTy); + unsigned TmpReg2 = makeAnotherReg(Type::IntTy); + BuildMI(*MBB, IP, PPC32::ADDIS, 2, TmpReg1).addReg(PPC32::R0).addImm(ConstRHS >> 16); + BuildMI(*MBB, IP, PPC32::ORI, 2, TmpReg2).addReg(TmpReg1).addImm(ConstRHS); + + // Emit a MUL to multiply the register holding the index by + // elementSize, putting the result in OffsetReg. + doMultiply(MBB, IP, DestReg, DestTy, op0Reg, TmpReg2); +} + +void ISel::visitMul(BinaryOperator &I) { + unsigned ResultReg = getReg(I); + + Value *Op0 = I.getOperand(0); + Value *Op1 = I.getOperand(1); + + MachineBasicBlock::iterator IP = BB->end(); + emitMultiply(BB, IP, Op0, Op1, ResultReg); +} + +void ISel::emitMultiply(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, unsigned DestReg) { + MachineBasicBlock &BB = *MBB; + TypeClass Class = getClass(Op0->getType()); + + // Simple scalar multiply? + unsigned Op0Reg = getReg(Op0, &BB, IP); + switch (Class) { + case cByte: + case cShort: + case cInt: + if (ConstantInt *CI = dyn_cast(Op1)) { + unsigned Val = (unsigned)CI->getRawValue(); // Isn't a 64-bit constant + doMultiplyConst(&BB, IP, DestReg, Op0->getType(), Op0Reg, Val); + } else { + unsigned Op1Reg = getReg(Op1, &BB, IP); + doMultiply(&BB, IP, DestReg, Op1->getType(), Op0Reg, Op1Reg); + } + return; + case cFP: + emitBinaryFPOperation(MBB, IP, Op0, Op1, 2, DestReg); + return; + case cLong: + break; + } + + // Long value. We have to do things the hard way... + if (ConstantInt *CI = dyn_cast(Op1)) { + unsigned CLow = CI->getRawValue(); + unsigned CHi = CI->getRawValue() >> 32; + + if (CLow == 0) { + // If the low part of the constant is all zeros, things are simple. + BuildMI(BB, IP, PPC32::ADDI, 2, DestReg).addReg(PPC32::R0).addImm(0); + doMultiplyConst(&BB, IP, DestReg+1, Type::UIntTy, Op0Reg, CHi); + return; + } + + // Multiply the two low parts + unsigned OverflowReg = 0; + if (CLow == 1) { + BuildMI(BB, IP, PPC32::OR, 2, DestReg).addReg(Op0Reg).addReg(Op0Reg); + } else { + unsigned TmpRegL = makeAnotherReg(Type::UIntTy); + unsigned Op1RegL = makeAnotherReg(Type::UIntTy); + OverflowReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, IP, PPC32::ADDIS, 2, TmpRegL).addReg(PPC32::R0).addImm(CLow >> 16); + BuildMI(BB, IP, PPC32::ORI, 2, Op1RegL).addReg(TmpRegL).addImm(CLow); + BuildMI(BB, IP, PPC32::MULLW, 2, DestReg).addReg(Op0Reg).addReg(Op1RegL); + BuildMI(BB, IP, PPC32::MULHW, 2, OverflowReg).addReg(Op0Reg).addReg(Op1RegL); + } + + unsigned AHBLReg = makeAnotherReg(Type::UIntTy); + doMultiplyConst(&BB, IP, AHBLReg, Type::UIntTy, Op0Reg+1, CLow); + + unsigned AHBLplusOverflowReg; + if (OverflowReg) { + AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, IP, PPC32::ADD, 2, // AH*BL+(AL*BL >> 32) + AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg); + } else { + AHBLplusOverflowReg = AHBLReg; + } + + if (CHi == 0) { + BuildMI(BB, IP, PPC32::OR, 2, DestReg+1).addReg(AHBLplusOverflowReg).addReg(AHBLplusOverflowReg); + } else { + unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH + doMultiplyConst(&BB, IP, ALBHReg, Type::UIntTy, Op0Reg, CHi); + + BuildMI(BB, IP, PPC32::ADD, 2, // AL*BH + AH*BL + (AL*BL >> 32) + DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg); + } + return; + } + + // General 64x64 multiply + + unsigned Op1Reg = getReg(Op1, &BB, IP); + + // Multiply the two low parts... capturing carry into EDX + BuildMI(BB, IP, PPC32::MULLW, 2, DestReg).addReg(Op0Reg).addReg(Op1Reg); // AL*BL + + unsigned OverflowReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, IP, PPC32::MULHW, 2, OverflowReg).addReg(Op0Reg).addReg(Op1Reg); // AL*BL >> 32 + + unsigned AHBLReg = makeAnotherReg(Type::UIntTy); // AH*BL + BuildMI(BB, IP, PPC32::MULLW, 2, AHBLReg).addReg(Op0Reg+1).addReg(Op1Reg); + + unsigned AHBLplusOverflowReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, IP, PPC32::ADD, 2, // AH*BL+(AL*BL >> 32) + AHBLplusOverflowReg).addReg(AHBLReg).addReg(OverflowReg); + + unsigned ALBHReg = makeAnotherReg(Type::UIntTy); // AL*BH + BuildMI(BB, IP, PPC32::MULLW, 2, ALBHReg).addReg(Op0Reg).addReg(Op1Reg+1); + + BuildMI(BB, IP, PPC32::ADD, 2, // AL*BH + AH*BL + (AL*BL >> 32) + DestReg+1).addReg(AHBLplusOverflowReg).addReg(ALBHReg); +} + + +/// visitDivRem - Handle division and remainder instructions... these +/// instruction both require the same instructions to be generated, they just +/// select the result from a different register. Note that both of these +/// instructions work differently for signed and unsigned operands. +/// +void ISel::visitDivRem(BinaryOperator &I) { + unsigned ResultReg = getReg(I); + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); + + MachineBasicBlock::iterator IP = BB->end(); + emitDivRemOperation(BB, IP, Op0, Op1, I.getOpcode() == Instruction::Div, ResultReg); +} + +void ISel::emitDivRemOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Op0, Value *Op1, bool isDiv, + unsigned ResultReg) { + const Type *Ty = Op0->getType(); + unsigned Class = getClass(Ty); + switch (Class) { + case cFP: // Floating point divide + if (isDiv) { + emitBinaryFPOperation(BB, IP, Op0, Op1, 3, ResultReg); + return; + } else { // Floating point remainder... + unsigned Op0Reg = getReg(Op0, BB, IP); + unsigned Op1Reg = getReg(Op1, BB, IP); + MachineInstr *TheCall = + BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("fmod", true); + std::vector Args; + Args.push_back(ValueRecord(Op0Reg, Type::DoubleTy)); + Args.push_back(ValueRecord(Op1Reg, Type::DoubleTy)); + doCall(ValueRecord(ResultReg, Type::DoubleTy), TheCall, Args); + } + return; + case cLong: { + static const char *FnName[] = + { "__moddi3", "__divdi3", "__umoddi3", "__udivdi3" }; + unsigned Op0Reg = getReg(Op0, BB, IP); + unsigned Op1Reg = getReg(Op1, BB, IP); + unsigned NameIdx = Ty->isUnsigned()*2 + isDiv; + MachineInstr *TheCall = + BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol(FnName[NameIdx], true); + + std::vector Args; + Args.push_back(ValueRecord(Op0Reg, Type::LongTy)); + Args.push_back(ValueRecord(Op1Reg, Type::LongTy)); + doCall(ValueRecord(ResultReg, Type::LongTy), TheCall, Args); + return; + } + case cByte: case cShort: case cInt: + break; // Small integrals, handled below... + default: assert(0 && "Unknown class!"); + } + + // Special case signed division by power of 2. + if (isDiv) + if (ConstantSInt *CI = dyn_cast(Op1)) { + assert(Class != cLong && "This doesn't handle 64-bit divides!"); + int V = CI->getValue(); + + if (V == 1) { // X /s 1 => X + unsigned Op0Reg = getReg(Op0, BB, IP); + BuildMI(*BB, IP, PPC32::OR, 2, ResultReg).addReg(Op0Reg).addReg(Op0Reg); + return; + } + + if (V == -1) { // X /s -1 => -X + unsigned Op0Reg = getReg(Op0, BB, IP); + BuildMI(*BB, IP, PPC32::NEG, 1, ResultReg).addReg(Op0Reg); + return; + } + + bool isNeg = false; + if (V < 0) { // Not a positive power of 2? + V = -V; + isNeg = true; // Maybe it's a negative power of 2. + } + if (unsigned Log = ExactLog2(V)) { + --Log; + unsigned Op0Reg = getReg(Op0, BB, IP); + unsigned TmpReg = makeAnotherReg(Op0->getType()); + if (Log != 1) + BuildMI(*BB, IP, PPC32::SRAWI, 2, TmpReg).addReg(Op0Reg).addImm(Log-1); + else + BuildMI(*BB, IP, PPC32::OR, 2, TmpReg).addReg(Op0Reg).addReg(Op0Reg); + + unsigned TmpReg2 = makeAnotherReg(Op0->getType()); + BuildMI(*BB, IP, PPC32::RLWINM, 4, TmpReg2).addReg(TmpReg).addImm(Log).addImm(32-Log).addImm(31); + + unsigned TmpReg3 = makeAnotherReg(Op0->getType()); + BuildMI(*BB, IP, PPC32::ADD, 2, TmpReg3).addReg(Op0Reg).addReg(TmpReg2); + + unsigned TmpReg4 = isNeg ? makeAnotherReg(Op0->getType()) : ResultReg; + BuildMI(*BB, IP, PPC32::SRAWI, 2, TmpReg4).addReg(Op0Reg).addImm(Log); + + if (isNeg) + BuildMI(*BB, IP, PPC32::NEG, 1, ResultReg).addReg(TmpReg4); + return; + } + } + + unsigned Op0Reg = getReg(Op0, BB, IP); + unsigned Op1Reg = getReg(Op1, BB, IP); + + if (isDiv) { + if (Ty->isSigned()) { + BuildMI(*BB, IP, PPC32::DIVW, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg); + } else { + BuildMI(*BB, IP, PPC32::DIVWU, 2, ResultReg).addReg(Op0Reg).addReg(Op1Reg); + } + } else { // Remainder + unsigned TmpReg1 = makeAnotherReg(Op0->getType()); + unsigned TmpReg2 = makeAnotherReg(Op0->getType()); + + if (Ty->isSigned()) { + BuildMI(*BB, IP, PPC32::DIVW, 2, TmpReg1).addReg(Op0Reg).addReg(Op1Reg); + } else { + BuildMI(*BB, IP, PPC32::DIVWU, 2, TmpReg1).addReg(Op0Reg).addReg(Op1Reg); + } + BuildMI(*BB, IP, PPC32::MULLW, 2, TmpReg2).addReg(TmpReg1).addReg(Op1Reg); + BuildMI(*BB, IP, PPC32::SUBF, 2, ResultReg).addReg(TmpReg2).addReg(Op0Reg); + } +} + + +/// Shift instructions: 'shl', 'sar', 'shr' - Some special cases here +/// for constant immediate shift values, and for constant immediate +/// shift values equal to 1. Even the general case is sort of special, +/// because the shift amount has to be in CL, not just any old register. +/// +void ISel::visitShiftInst(ShiftInst &I) { + MachineBasicBlock::iterator IP = BB->end (); + emitShiftOperation (BB, IP, I.getOperand (0), I.getOperand (1), + I.getOpcode () == Instruction::Shl, I.getType (), + getReg (I)); +} + +/// emitShiftOperation - Common code shared between visitShiftInst and +/// constant expression support. +void ISel::emitShiftOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Op, Value *ShiftAmount, bool isLeftShift, + const Type *ResultTy, unsigned DestReg) { + unsigned SrcReg = getReg (Op, MBB, IP); + bool isSigned = ResultTy->isSigned (); + unsigned Class = getClass (ResultTy); + + // Longs, as usual, are handled specially... + if (Class == cLong) { + // If we have a constant shift, we can generate much more efficient code + // than otherwise... + // + if (ConstantUInt *CUI = dyn_cast(ShiftAmount)) { + unsigned Amount = CUI->getValue(); + if (Amount < 32) { + if (isLeftShift) { + // FIXME: RLWIMI is a use-and-def of DestReg+1, but that violates SSA + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg+1).addReg(SrcReg+1).addImm(Amount).addImm(0).addImm(31-Amount); + BuildMI(*MBB, IP, PPC32::RLWIMI, 5).addReg(DestReg+1).addReg(SrcReg).addImm(Amount).addImm(32-Amount).addImm(31); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addImm(Amount).addImm(0).addImm(31-Amount); + } else { + // FIXME: RLWIMI is a use-and-def of DestReg, but that violates SSA + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addImm(32-Amount).addImm(Amount).addImm(31); + BuildMI(*MBB, IP, PPC32::RLWIMI, 5).addReg(DestReg).addReg(SrcReg+1).addImm(32-Amount).addImm(0).addImm(Amount-1); + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg+1).addReg(SrcReg+1).addImm(32-Amount).addImm(Amount).addImm(31); + } + } else { // Shifting more than 32 bits + Amount -= 32; + if (isLeftShift) { + if (Amount != 0) { + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg+1).addReg(SrcReg).addImm(Amount).addImm(0).addImm(31-Amount); + } else { + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(SrcReg).addReg(SrcReg); + } + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg).addReg(PPC32::R0).addImm(0); + } else { + if (Amount != 0) { + if (isSigned) + BuildMI(*MBB, IP, PPC32::SRAWI, 2, DestReg).addReg(SrcReg+1).addImm(Amount); + else + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg+1).addImm(32-Amount).addImm(Amount).addImm(31); + } else { + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg+1).addReg(SrcReg+1); + } + BuildMI(*MBB, IP, PPC32::ADDI, 2, DestReg+1).addReg(PPC32::R0).addImm(0); + } + } + } else { + unsigned TmpReg1 = makeAnotherReg(Type::IntTy); + unsigned TmpReg2 = makeAnotherReg(Type::IntTy); + unsigned TmpReg3 = makeAnotherReg(Type::IntTy); + unsigned TmpReg4 = makeAnotherReg(Type::IntTy); + unsigned TmpReg5 = makeAnotherReg(Type::IntTy); + unsigned TmpReg6 = makeAnotherReg(Type::IntTy); + unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP); + + if (isLeftShift) { + BuildMI(*MBB, IP, PPC32::SUBFIC, 2, TmpReg1).addReg(ShiftAmountReg).addImm(32); + BuildMI(*MBB, IP, PPC32::SLW, 2, TmpReg2).addReg(SrcReg+1).addReg(ShiftAmountReg); + BuildMI(*MBB, IP, PPC32::SRW, 2, TmpReg3).addReg(SrcReg).addReg(TmpReg1); + BuildMI(*MBB, IP, PPC32::OR, 2, TmpReg4).addReg(TmpReg2).addReg(TmpReg3); + BuildMI(*MBB, IP, PPC32::ADDI, 2, TmpReg5).addReg(ShiftAmountReg).addImm(-32); + BuildMI(*MBB, IP, PPC32::SLW, 2, TmpReg6).addReg(SrcReg).addReg(TmpReg5); + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg+1).addReg(TmpReg4).addReg(TmpReg6); + BuildMI(*MBB, IP, PPC32::SLW, 2, DestReg).addReg(SrcReg).addReg(ShiftAmountReg); + } else { + if (isSigned) { + // FIXME: Unimplmented + // Page C-3 of the PowerPC 32bit Programming Environments Manual + } else { + BuildMI(*MBB, IP, PPC32::SUBFIC, 2, TmpReg1).addReg(ShiftAmountReg).addImm(32); + BuildMI(*MBB, IP, PPC32::SRW, 2, TmpReg2).addReg(SrcReg).addReg(ShiftAmountReg); + BuildMI(*MBB, IP, PPC32::SLW, 2, TmpReg3).addReg(SrcReg+1).addReg(TmpReg1); + BuildMI(*MBB, IP, PPC32::OR, 2, TmpReg4).addReg(TmpReg2).addReg(TmpReg3); + BuildMI(*MBB, IP, PPC32::ADDI, 2, TmpReg5).addReg(ShiftAmountReg).addImm(-32); + BuildMI(*MBB, IP, PPC32::SRW, 2, TmpReg6).addReg(SrcReg+1).addReg(TmpReg5); + BuildMI(*MBB, IP, PPC32::OR, 2, DestReg).addReg(TmpReg4).addReg(TmpReg6); + BuildMI(*MBB, IP, PPC32::SRW, 2, DestReg+1).addReg(SrcReg+1).addReg(ShiftAmountReg); + } + } + } + return; + } + + if (ConstantUInt *CUI = dyn_cast(ShiftAmount)) { + // The shift amount is constant, guaranteed to be a ubyte. Get its value. + assert(CUI->getType() == Type::UByteTy && "Shift amount not a ubyte?"); + unsigned Amount = CUI->getValue(); + + if (isLeftShift) { + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addImm(Amount).addImm(0).addImm(31-Amount); + } else { + if (isSigned) { + BuildMI(*MBB, IP, PPC32::SRAWI, 2, DestReg).addReg(SrcReg).addImm(Amount); + } else { + BuildMI(*MBB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addImm(32-Amount).addImm(Amount).addImm(31); + } + } + } else { // The shift amount is non-constant. + unsigned ShiftAmountReg = getReg (ShiftAmount, MBB, IP); + + if (isLeftShift) { + BuildMI(*MBB, IP, PPC32::SLW, 2, DestReg).addReg(SrcReg).addReg(ShiftAmountReg); + } else { + BuildMI(*MBB, IP, isSigned ? PPC32::SRAW : PPC32::SRW, 2, DestReg).addReg(SrcReg).addReg(ShiftAmountReg); + } + } +} + + +/// visitLoadInst - Implement LLVM load instructions +/// +void ISel::visitLoadInst(LoadInst &I) { + static const unsigned Opcodes[] = { PPC32::LBZ, PPC32::LHZ, PPC32::LWZ, PPC32::LFS }; + unsigned Class = getClassB(I.getType()); + unsigned Opcode = Opcodes[Class]; + if (I.getType() == Type::DoubleTy) Opcode = PPC32::LFD; + + unsigned DestReg = getReg(I); + + if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(0))) { + unsigned FI = getFixedSizedAllocaFI(AI); + if (Class == cLong) { + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, DestReg), FI); + addFrameReference(BuildMI(BB, PPC32::LWZ, 2, DestReg+1), FI, 4); + } else { + addFrameReference(BuildMI(BB, Opcode, 2, DestReg), FI); + } + } else { + unsigned SrcAddrReg = getReg(I.getOperand(0)); + + if (Class == cLong) { + BuildMI(BB, PPC32::LWZ, 2, DestReg).addImm(0).addReg(SrcAddrReg); + BuildMI(BB, PPC32::LWZ, 2, DestReg+1).addImm(4).addReg(SrcAddrReg); + } else { + BuildMI(BB, Opcode, 2, DestReg).addImm(0).addReg(SrcAddrReg); + } + } +} + +/// visitStoreInst - Implement LLVM store instructions +/// +void ISel::visitStoreInst(StoreInst &I) { + unsigned ValReg = getReg(I.getOperand(0)); + unsigned AddressReg = getReg(I.getOperand(1)); + + const Type *ValTy = I.getOperand(0)->getType(); + unsigned Class = getClassB(ValTy); + + if (Class == cLong) { + BuildMI(BB, PPC32::STW, 3).addReg(ValReg).addImm(0).addReg(AddressReg); + BuildMI(BB, PPC32::STW, 3).addReg(ValReg+1).addImm(4).addReg(AddressReg); + return; + } + + static const unsigned Opcodes[] = { + PPC32::STB, PPC32::STH, PPC32::STW, PPC32::STFS + }; + unsigned Opcode = Opcodes[Class]; + if (ValTy == Type::DoubleTy) Opcode = PPC32::STFD; + BuildMI(BB, Opcode, 3).addReg(ValReg).addImm(0).addReg(AddressReg); +} + + +/// visitCastInst - Here we have various kinds of copying with or without sign +/// extension going on. +/// +void ISel::visitCastInst(CastInst &CI) { + Value *Op = CI.getOperand(0); + + unsigned SrcClass = getClassB(Op->getType()); + unsigned DestClass = getClassB(CI.getType()); + // Noop casts are not emitted: getReg will return the source operand as the + // register to use for any uses of the noop cast. + if (DestClass == SrcClass) + return; + + // If this is a cast from a 32-bit integer to a Long type, and the only uses + // of the case are GEP instructions, then the cast does not need to be + // generated explicitly, it will be folded into the GEP. + if (DestClass == cLong && SrcClass == cInt) { + bool AllUsesAreGEPs = true; + for (Value::use_iterator I = CI.use_begin(), E = CI.use_end(); I != E; ++I) + if (!isa(*I)) { + AllUsesAreGEPs = false; + break; + } + + // No need to codegen this cast if all users are getelementptr instrs... + if (AllUsesAreGEPs) return; + } + + unsigned DestReg = getReg(CI); + MachineBasicBlock::iterator MI = BB->end(); + emitCastOperation(BB, MI, Op, CI.getType(), DestReg); +} + +/// emitCastOperation - Common code shared between visitCastInst and constant +/// expression cast support. +/// +void ISel::emitCastOperation(MachineBasicBlock *BB, + MachineBasicBlock::iterator IP, + Value *Src, const Type *DestTy, + unsigned DestReg) { + const Type *SrcTy = Src->getType(); + unsigned SrcClass = getClassB(SrcTy); + unsigned DestClass = getClassB(DestTy); + unsigned SrcReg = getReg(Src, BB, IP); + + // Implement casts to bool by using compare on the operand followed by set if + // not zero on the result. + if (DestTy == Type::BoolTy) { + switch (SrcClass) { + case cByte: + case cShort: + case cInt: { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + BuildMI(*BB, IP, PPC32::ADDIC, 2, TmpReg).addReg(SrcReg).addImm(-1); + BuildMI(*BB, IP, PPC32::SUBFE, 2, DestReg).addReg(TmpReg).addReg(SrcReg); + break; + } + case cLong: { + unsigned TmpReg = makeAnotherReg(Type::IntTy); + unsigned SrcReg2 = makeAnotherReg(Type::IntTy); + BuildMI(*BB, IP, PPC32::OR, 2, SrcReg2).addReg(SrcReg).addReg(SrcReg+1); + BuildMI(*BB, IP, PPC32::ADDIC, 2, TmpReg).addReg(SrcReg2).addImm(-1); + BuildMI(*BB, IP, PPC32::SUBFE, 2, DestReg).addReg(TmpReg).addReg(SrcReg2); + break; + } + case cFP: + // FIXME + // Load -0.0 + // Compare + // move to CR1 + // Negate -0.0 + // Compare + // CROR + // MFCR + // Left-align + // SRA ? + break; + } + return; + } + + // Implement casts between values of the same type class (as determined by + // getClass) by using a register-to-register move. + if (SrcClass == DestClass) { + if (SrcClass <= cInt) { + BuildMI(*BB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + } else if (SrcClass == cFP && SrcTy == DestTy) { + BuildMI(*BB, IP, PPC32::FMR, 1, DestReg).addReg(SrcReg); + } else if (SrcClass == cFP) { + if (SrcTy == Type::FloatTy) { // float -> double + assert(DestTy == Type::DoubleTy && "Unknown cFP member!"); + BuildMI(*BB, IP, PPC32::FMR, 1, DestReg).addReg(SrcReg); + } else { // double -> float + assert(SrcTy == Type::DoubleTy && DestTy == Type::FloatTy && + "Unknown cFP member!"); + BuildMI(*BB, IP, PPC32::FRSP, 1, DestReg).addReg(SrcReg); + } + } else if (SrcClass == cLong) { + BuildMI(*BB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + BuildMI(*BB, IP, PPC32::OR, 2, DestReg+1).addReg(SrcReg+1).addReg(SrcReg+1); + } else { + assert(0 && "Cannot handle this type of cast instruction!"); + abort(); + } + return; + } + + // Handle cast of SMALLER int to LARGER int using a move with sign extension + // or zero extension, depending on whether the source type was signed. + if (SrcClass <= cInt && (DestClass <= cInt || DestClass == cLong) && + SrcClass < DestClass) { + bool isLong = DestClass == cLong; + if (isLong) DestClass = cInt; + + bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy; + if (SrcClass < cInt) { + if (isUnsigned) { + unsigned shift = (SrcClass == cByte) ? 24 : 16; + BuildMI(*BB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addZImm(0).addImm(shift).addImm(31); + } else { + BuildMI(*BB, IP, (SrcClass == cByte) ? PPC32::EXTSB : PPC32::EXTSH, 1, DestReg).addReg(SrcReg); + } + } else { + BuildMI(*BB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + } + + if (isLong) { // Handle upper 32 bits as appropriate... + if (isUnsigned) // Zero out top bits... + BuildMI(*BB, IP, PPC32::ADDI, 2, DestReg+1).addReg(PPC32::R0).addImm(0); + else // Sign extend bottom half... + BuildMI(*BB, IP, PPC32::SRAWI, 2, DestReg+1).addReg(DestReg).addImm(31); + } + return; + } + + // Special case long -> int ... + if (SrcClass == cLong && DestClass == cInt) { + BuildMI(*BB, IP, PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + return; + } + + // Handle cast of LARGER int to SMALLER int with a clear or sign extend + if ((SrcClass <= cInt || SrcClass == cLong) && DestClass <= cInt + && SrcClass > DestClass) { + bool isUnsigned = SrcTy->isUnsigned() || SrcTy == Type::BoolTy; + if (isUnsigned) { + unsigned shift = (SrcClass == cByte) ? 24 : 16; + BuildMI(*BB, IP, PPC32::RLWINM, 4, DestReg).addReg(SrcReg).addZImm(0).addImm(shift).addImm(31); + } else { + BuildMI(*BB, IP, (SrcClass == cByte) ? PPC32::EXTSB : PPC32::EXTSH, 1, DestReg).addReg(SrcReg); + } + return; + } + + // Handle casts from integer to floating point now... + if (DestClass == cFP) { + + // Emit a library call for long to float conversion + if (SrcClass == cLong) { + std::vector Args; + Args.push_back(ValueRecord(SrcReg, SrcTy)); + MachineInstr *TheCall = BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("__floatdidf", true); + doCall(ValueRecord(DestReg, DestTy), TheCall, Args); + return; + } + + unsigned TmpReg = makeAnotherReg(Type::IntTy); + switch (SrcTy->getPrimitiveID()) { + case Type::BoolTyID: + case Type::SByteTyID: + BuildMI(*BB, IP, PPC32::EXTSB, 1, TmpReg).addReg(SrcReg); + break; + case Type::UByteTyID: + BuildMI(*BB, IP, PPC32::RLWINM, 4, TmpReg).addReg(SrcReg).addZImm(0).addImm(24).addImm(31); + break; + case Type::ShortTyID: + BuildMI(*BB, IP, PPC32::EXTSB, 1, TmpReg).addReg(SrcReg); + break; + case Type::UShortTyID: + BuildMI(*BB, IP, PPC32::RLWINM, 4, TmpReg).addReg(SrcReg).addZImm(0).addImm(16).addImm(31); + break; + case Type::IntTyID: + BuildMI(*BB, IP, PPC32::OR, 2, TmpReg).addReg(SrcReg).addReg(SrcReg); + break; + case Type::UIntTyID: + BuildMI(*BB, IP, PPC32::OR, 2, TmpReg).addReg(SrcReg).addReg(SrcReg); + break; + default: // No promotion needed... + break; + } + + SrcReg = TmpReg; + + // Spill the integer to memory and reload it from there. + // Also spill room for a special conversion constant + int ConstantFrameIndex = + F->getFrameInfo()->CreateStackObject(Type::DoubleTy, TM.getTargetData()); + int ValueFrameIdx = + F->getFrameInfo()->CreateStackObject(Type::DoubleTy, TM.getTargetData()); + + unsigned constantHi = makeAnotherReg(Type::IntTy); + unsigned constantLo = makeAnotherReg(Type::IntTy); + unsigned ConstF = makeAnotherReg(Type::DoubleTy); + unsigned TempF = makeAnotherReg(Type::DoubleTy); + + if (!SrcTy->isSigned()) { + BuildMI(*BB, IP, PPC32::ADDIS, 2, constantHi).addReg(PPC32::R0).addImm(0x4330); + BuildMI(*BB, IP, PPC32::ADDI, 2, constantLo).addReg(PPC32::R0).addImm(0); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantHi), ConstantFrameIndex); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantLo), ConstantFrameIndex, 4); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantHi), ValueFrameIdx); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(SrcReg), ValueFrameIdx, 4); + addFrameReference(BuildMI(*BB, IP, PPC32::LFD, 2, ConstF), ConstantFrameIndex); + addFrameReference(BuildMI(*BB, IP, PPC32::LFD, 2, TempF), ValueFrameIdx); + BuildMI(*BB, IP, PPC32::FSUB, 2, DestReg).addReg(TempF).addReg(ConstF); + } else { + unsigned TempLo = makeAnotherReg(Type::IntTy); + BuildMI(*BB, IP, PPC32::ADDIS, 2, constantHi).addReg(PPC32::R0).addImm(0x4330); + BuildMI(*BB, IP, PPC32::ADDIS, 2, constantLo).addReg(PPC32::R0).addImm(0x8000); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantHi), ConstantFrameIndex); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantLo), ConstantFrameIndex, 4); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(constantHi), ValueFrameIdx); + BuildMI(*BB, IP, PPC32::XORIS, 2, TempLo).addReg(SrcReg).addImm(0x8000); + addFrameReference(BuildMI(*BB, IP, PPC32::STW, 3).addReg(TempLo), ValueFrameIdx, 4); + addFrameReference(BuildMI(*BB, IP, PPC32::LFD, 2, ConstF), ConstantFrameIndex); + addFrameReference(BuildMI(*BB, IP, PPC32::LFD, 2, TempF), ValueFrameIdx); + BuildMI(*BB, IP, PPC32::FSUB, 2, DestReg).addReg(TempF).addReg(ConstF); + } + return; + } + + // Handle casts from floating point to integer now... + if (SrcClass == cFP) { + + // emit library call + if (DestClass == cLong) { + std::vector Args; + Args.push_back(ValueRecord(SrcReg, SrcTy)); + MachineInstr *TheCall = BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("__fixdfdi", true); + doCall(ValueRecord(DestReg, DestTy), TheCall, Args); + return; + } + + int ValueFrameIdx = + F->getFrameInfo()->CreateStackObject(Type::DoubleTy, TM.getTargetData()); + + // load into 32 bit value, and then truncate as necessary + // FIXME: This is wrong for unsigned dest types + //if (DestTy->isSigned()) { + unsigned TempReg = makeAnotherReg(Type::DoubleTy); + BuildMI(*BB, IP, PPC32::FCTIWZ, 1, TempReg).addReg(SrcReg); + addFrameReference(BuildMI(*BB, IP, PPC32::STFD, 3).addReg(TempReg), ValueFrameIdx); + addFrameReference(BuildMI(*BB, IP, PPC32::LWZ, 2, DestReg), ValueFrameIdx+4); + //} else { + //} + + // FIXME: Truncate return value + return; + } + + // Anything we haven't handled already, we can't (yet) handle at all. + assert(0 && "Unhandled cast instruction!"); + abort(); +} + +/// visitVANextInst - Implement the va_next instruction... +/// +void ISel::visitVANextInst(VANextInst &I) { + unsigned VAList = getReg(I.getOperand(0)); + unsigned DestReg = getReg(I); + + unsigned Size; + switch (I.getArgType()->getPrimitiveID()) { + default: + std::cerr << I; + assert(0 && "Error: bad type for va_next instruction!"); + return; + case Type::PointerTyID: + case Type::UIntTyID: + case Type::IntTyID: + Size = 4; + break; + case Type::ULongTyID: + case Type::LongTyID: + case Type::DoubleTyID: + Size = 8; + break; + } + + // Increment the VAList pointer... + BuildMI(BB, PPC32::ADDI, 2, DestReg).addReg(VAList).addImm(Size); +} + +void ISel::visitVAArgInst(VAArgInst &I) { + unsigned VAList = getReg(I.getOperand(0)); + unsigned DestReg = getReg(I); + + switch (I.getType()->getPrimitiveID()) { + default: + std::cerr << I; + assert(0 && "Error: bad type for va_next instruction!"); + return; + case Type::PointerTyID: + case Type::UIntTyID: + case Type::IntTyID: + BuildMI(BB, PPC32::LWZ, 2, DestReg).addImm(0).addReg(VAList); + break; + case Type::ULongTyID: + case Type::LongTyID: + BuildMI(BB, PPC32::LWZ, 2, DestReg).addImm(0).addReg(VAList); + BuildMI(BB, PPC32::LWZ, 2, DestReg+1).addImm(4).addReg(VAList); + break; + case Type::DoubleTyID: + BuildMI(BB, PPC32::LFD, 2, DestReg).addImm(0).addReg(VAList); + break; + } +} + +/// visitGetElementPtrInst - instruction-select GEP instructions +/// +void ISel::visitGetElementPtrInst(GetElementPtrInst &I) { + unsigned outputReg = getReg(I); + emitGEPOperation(BB, BB->end(), I.getOperand(0),I.op_begin()+1, I.op_end(), outputReg); +} + +void ISel::emitGEPOperation(MachineBasicBlock *MBB, + MachineBasicBlock::iterator IP, + Value *Src, User::op_iterator IdxBegin, + User::op_iterator IdxEnd, unsigned TargetReg) { + const TargetData &TD = TM.getTargetData(); + if (ConstantPointerRef *CPR = dyn_cast(Src)) + Src = CPR->getValue(); + + std::vector GEPOps; + GEPOps.resize(IdxEnd-IdxBegin+1); + GEPOps[0] = Src; + std::copy(IdxBegin, IdxEnd, GEPOps.begin()+1); + + std::vector GEPTypes; + GEPTypes.assign(gep_type_begin(Src->getType(), IdxBegin, IdxEnd), + gep_type_end(Src->getType(), IdxBegin, IdxEnd)); + + // Keep emitting instructions until we consume the entire GEP instruction. + while (!GEPOps.empty()) { + // It's an array or pointer access: [ArraySize x ElementType]. + const SequentialType *SqTy = cast(GEPTypes.back()); + Value *idx = GEPOps.back(); + GEPOps.pop_back(); // Consume a GEP operand + GEPTypes.pop_back(); + + // Many GEP instructions use a [cast (int/uint) to LongTy] as their + // operand on X86. Handle this case directly now... + if (CastInst *CI = dyn_cast(idx)) + if (CI->getOperand(0)->getType() == Type::IntTy || + CI->getOperand(0)->getType() == Type::UIntTy) + idx = CI->getOperand(0); + + // We want to add BaseReg to(idxReg * sizeof ElementType). First, we + // must find the size of the pointed-to type (Not coincidentally, the next + // type is the type of the elements in the array). + const Type *ElTy = SqTy->getElementType(); + unsigned elementSize = TD.getTypeSize(ElTy); + + if (elementSize == 1) { + // If the element size is 1, we don't have to multiply, just add + unsigned idxReg = getReg(idx, MBB, IP); + unsigned Reg = makeAnotherReg(Type::UIntTy); + BuildMI(*MBB, IP, PPC32::ADD, 2,TargetReg).addReg(Reg).addReg(idxReg); + --IP; // Insert the next instruction before this one. + TargetReg = Reg; // Codegen the rest of the GEP into this + } else { + unsigned idxReg = getReg(idx, MBB, IP); + unsigned OffsetReg = makeAnotherReg(Type::UIntTy); + + // Make sure we can back the iterator up to point to the first + // instruction emitted. + MachineBasicBlock::iterator BeforeIt = IP; + if (IP == MBB->begin()) + BeforeIt = MBB->end(); + else + --BeforeIt; + doMultiplyConst(MBB, IP, OffsetReg, Type::IntTy, idxReg, elementSize); + + // Emit an ADD to add OffsetReg to the basePtr. + unsigned Reg = makeAnotherReg(Type::UIntTy); + BuildMI(*MBB, IP, PPC32::ADD, 2, TargetReg).addReg(Reg).addReg(OffsetReg); + + // Step to the first instruction of the multiply. + if (BeforeIt == MBB->end()) + IP = MBB->begin(); + else + IP = ++BeforeIt; + + TargetReg = Reg; // Codegen the rest of the GEP into this + } + } +} + +/// visitAllocaInst - If this is a fixed size alloca, allocate space from the +/// frame manager, otherwise do it the hard way. +/// +void ISel::visitAllocaInst(AllocaInst &I) { + // If this is a fixed size alloca in the entry block for the function, we + // statically stack allocate the space, so we don't need to do anything here. + // + if (dyn_castFixedAlloca(&I)) return; + + // Find the data size of the alloca inst's getAllocatedType. + const Type *Ty = I.getAllocatedType(); + unsigned TySize = TM.getTargetData().getTypeSize(Ty); + + // Create a register to hold the temporary result of multiplying the type size + // constant by the variable amount. + unsigned TotalSizeReg = makeAnotherReg(Type::UIntTy); + unsigned SrcReg1 = getReg(I.getArraySize()); + + // TotalSizeReg = mul , + MachineBasicBlock::iterator MBBI = BB->end(); + doMultiplyConst(BB, MBBI, TotalSizeReg, Type::UIntTy, SrcReg1, TySize); + + // AddedSize = add , 15 + unsigned AddedSizeReg = makeAnotherReg(Type::UIntTy); + BuildMI(BB, PPC32::ADD, 2, AddedSizeReg).addReg(TotalSizeReg).addImm(15); + + // AlignedSize = and , ~15 + unsigned AlignedSize = makeAnotherReg(Type::UIntTy); + BuildMI(BB, PPC32::RLWNM, 4, AlignedSize).addReg(AddedSizeReg).addImm(0).addImm(0).addImm(27); + + // Subtract size from stack pointer, thereby allocating some space. + BuildMI(BB, PPC32::SUB, 2, PPC32::R1).addReg(PPC32::R1).addReg(AlignedSize); + + // Put a pointer to the space into the result register, by copying + // the stack pointer. + BuildMI(BB, PPC32::OR, 2, getReg(I)).addReg(PPC32::R1).addReg(PPC32::R1); + + // Inform the Frame Information that we have just allocated a variable-sized + // object. + F->getFrameInfo()->CreateVariableSizedObject(); +} + +/// visitMallocInst - Malloc instructions are code generated into direct calls +/// to the library malloc. +/// +void ISel::visitMallocInst(MallocInst &I) { + unsigned AllocSize = TM.getTargetData().getTypeSize(I.getAllocatedType()); + unsigned Arg; + + if (ConstantUInt *C = dyn_cast(I.getOperand(0))) { + Arg = getReg(ConstantUInt::get(Type::UIntTy, C->getValue() * AllocSize)); + } else { + Arg = makeAnotherReg(Type::UIntTy); + unsigned Op0Reg = getReg(I.getOperand(0)); + MachineBasicBlock::iterator MBBI = BB->end(); + doMultiplyConst(BB, MBBI, Arg, Type::UIntTy, Op0Reg, AllocSize); + } + + std::vector Args; + Args.push_back(ValueRecord(Arg, Type::UIntTy)); + MachineInstr *TheCall = BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("malloc", true); + doCall(ValueRecord(getReg(I), I.getType()), TheCall, Args); +} + + +/// visitFreeInst - Free instructions are code gen'd to call the free libc +/// function. +/// +void ISel::visitFreeInst(FreeInst &I) { + std::vector Args; + Args.push_back(ValueRecord(I.getOperand(0))); + MachineInstr *TheCall = BuildMI(PPC32::CALLpcrel, 1).addExternalSymbol("free", true); + doCall(ValueRecord(0, Type::VoidTy), TheCall, Args); +} + +/// createPPC32SimpleInstructionSelector - This pass converts an LLVM function +/// into a machine code representation is a very simple peep-hole fashion. The +/// generated code sucks but the implementation is nice and simple. +/// +FunctionPass *llvm::createPPCSimpleInstructionSelector(TargetMachine &TM) { + return new ISel(TM); +} diff --git a/lib/Target/PowerPC/PowerPCInstrInfo.cpp b/lib/Target/PowerPC/PowerPCInstrInfo.cpp new file mode 100644 index 00000000000..8340e783a67 --- /dev/null +++ b/lib/Target/PowerPC/PowerPCInstrInfo.cpp @@ -0,0 +1,22 @@ +//===- PowerPCInstrInfo.cpp - PowerPC Instruction Information ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the PowerPC implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#include "PowerPCInstrInfo.h" +#include "PowerPC.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "PowerPCGenInstrInfo.inc" +using namespace llvm; + +PowerPCInstrInfo::PowerPCInstrInfo() + : TargetInstrInfo(PowerPCInsts, sizeof(PowerPCInsts)/sizeof(PowerPCInsts[0])){ +} diff --git a/lib/Target/PowerPC/PowerPCInstrInfo.h b/lib/Target/PowerPC/PowerPCInstrInfo.h new file mode 100644 index 00000000000..d50e05e85fa --- /dev/null +++ b/lib/Target/PowerPC/PowerPCInstrInfo.h @@ -0,0 +1,78 @@ +//===- PowerPCInstrInfo.h - PowerPC Instruction Information -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the PowerPC implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef POWERPCINSTRUCTIONINFO_H +#define POWERPCINSTRUCTIONINFO_H + +#include "llvm/Target/TargetInstrInfo.h" +#include "PowerPCRegisterInfo.h" + +namespace llvm { + +namespace PPC32II { + enum { + ArgCountShift = 0, + ArgCountMask = 7, + + Arg0TypeShift = 3, + Arg1TypeShift = 8, + Arg2TypeShift = 13, + Arg3TypeShift = 18, + Arg4TypeShift = 23, + VMX = 1<<28, + PPC64 = 1<<29, + ArgTypeMask = 31 + }; + + enum { + None = 0, + Gpr = 1, + Gpr0 = 2, + Simm16 = 3, + Zimm16 = 4, + PCRelimm24 = 5, + Imm24 = 6, + Imm5 = 7, + PCRelimm14 = 8, + Imm14 = 9, + Imm2 = 10, + Crf = 11, + Imm3 = 12, + Imm1 = 13, + Fpr = 14, + Imm4 = 15, + Imm8 = 16, + Disimm16 = 17, + Disimm14 = 18, + Spr = 19, + Sgr = 20, + Imm15 = 21, + Vpr = 22 + }; +} + +class PowerPCInstrInfo : public TargetInstrInfo { + const PowerPCRegisterInfo RI; +public: + PowerPCInstrInfo(); + + /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As + /// such, whenever a client has an instance of instruction info, it should + /// always be able to get register info as well (through this method). + /// + virtual const MRegisterInfo &getRegisterInfo() const { return RI; } +}; + +} + +#endif diff --git a/lib/Target/PowerPC/PowerPCRegisterInfo.cpp b/lib/Target/PowerPC/PowerPCRegisterInfo.cpp new file mode 100644 index 00000000000..45dcf74f20e --- /dev/null +++ b/lib/Target/PowerPC/PowerPCRegisterInfo.cpp @@ -0,0 +1,268 @@ +//===- PowerPCRegisterInfo.cpp - PowerPC Register Information ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the PowerPC implementation of the MRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#include "PowerPC.h" +#include "PowerPCRegisterInfo.h" +#include "PowerPCInstrBuilder.h" +#include "llvm/Constants.h" +#include "llvm/Type.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetFrameInfo.h" +#include "Support/CommandLine.h" +#include "Support/STLExtras.h" +using namespace llvm; + +namespace { + cl::opt + NoFPElim("disable-fp-elim",cl::desc("Disable frame pointer elimination optimization")); +} + +PowerPCRegisterInfo::PowerPCRegisterInfo() + : PowerPCGenRegisterInfo(PPC32::ADJCALLSTACKDOWN, + PPC32::ADJCALLSTACKUP) {} + +static unsigned getIdx(const TargetRegisterClass *RC) { + if (RC == PowerPC::GPRCRegisterClass) { + switch (RC->getSize()) { + default: assert(0 && "Invalid data size!"); + case 1: return 0; + case 2: return 1; + case 4: return 2; + } + } + else if (RC == PowerPC::FPRCRegisterClass) { + switch (RC->getSize()) { + default: assert(0 && "Invalid data size!"); + case 4: return 3; + case 8: return 4; + } + } + abort(); +} + +int PowerPCRegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, int FrameIdx, + const TargetRegisterClass *RC) const { + static const unsigned Opcode[] = + { PPC32::STB, PPC32::STH, PPC32::STW, PPC32::STFS, PPC32::STFD }; + + unsigned OC = Opcode[getIdx(RC)]; + MBB.insert(MI, addFrameReference(BuildMI(OC, 3).addReg(SrcReg),FrameIdx)); + return 1; +} + +int PowerPCRegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIdx, + const TargetRegisterClass *RC) const{ + static const unsigned Opcode[] = + + { PPC32::LBZ, PPC32::LHZ, PPC32::LWZ, PPC32::LFS, PPC32::LFD }; + unsigned OC = Opcode[getIdx(RC)]; + MBB.insert(MI, addFrameReference(BuildMI(OC, 2, DestReg), FrameIdx)); + return 1; +} + +int PowerPCRegisterInfo::copyRegToReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, unsigned SrcReg, + const TargetRegisterClass *RC) const { + MachineInstr *I; + + if(RC == PowerPC::GPRCRegisterClass) { + I = BuildMI(PPC32::OR, 2, DestReg).addReg(SrcReg).addReg(SrcReg); + } else if (RC == PowerPC::FPRCRegisterClass) { + I = BuildMI(PPC32::FMR, 1, DestReg).addReg(SrcReg); + } else { + std::cerr << "Attempt to copy register that is not GPR or FPR"; + abort(); + } + MBB.insert(MI, I); + return 1; +} + +//===----------------------------------------------------------------------===// +// Stack Frame Processing methods +//===----------------------------------------------------------------------===// + +// hasFP - Return true if the specified function should have a dedicated frame +// pointer register. This is true if the function has variable sized allocas or +// if frame pointer elimination is disabled. +// +static bool hasFP(MachineFunction &MF) { + return NoFPElim || MF.getFrameInfo()->hasVarSizedObjects(); +} + +void PowerPCRegisterInfo:: +eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + if (hasFP(MF)) { + // If we have a frame pointer, turn the adjcallstackdown instruction into a + // 'sub r1, r1, ' and the adjcallstackup instruction into 'add r1, r1, ' + MachineInstr *Old = I; + int Amount = Old->getOperand(0).getImmedValue(); + if (Amount != 0) { + // We need to keep the stack aligned properly. To do this, we round the + // amount of space needed for the outgoing arguments up to the next + // alignment boundary. + unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); + Amount = (Amount+Align-1)/Align*Align; + + MachineInstr *New; + if (Old->getOpcode() == PPC32::ADJCALLSTACKDOWN) { + New=BuildMI(PPC32::ADDI, 2, PPC32::R1).addReg(PPC32::R1).addSImm(-Amount); + } else { + assert(Old->getOpcode() == PPC32::ADJCALLSTACKUP); + New=BuildMI(PPC32::ADDI, 2, PPC32::R1).addReg(PPC32::R1).addSImm(Amount); + } + + // Replace the pseudo instruction with a new instruction... + MBB.insert(I, New); + } + } + + MBB.erase(I); +} + +void +PowerPCRegisterInfo::eliminateFrameIndex(MachineFunction &MF, + MachineBasicBlock::iterator II) const { + unsigned i = 0; + MachineInstr &MI = *II; + while (!MI.getOperand(i).isFrameIndex()) { + ++i; + assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); + } + + int FrameIndex = MI.getOperand(i).getFrameIndex(); + + // This must be part of a four operand memory reference. Replace the + // FrameIndex with base register with GPR1. + MI.SetMachineOperandReg(i, PPC32::R1); + + // Take into account whether its an add or mem instruction + if (i == 2) i--; + + // Now add the frame object offset to the offset from r1. + int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) + + MI.getOperand(i).getImmedValue()+4; + + if (!hasFP(MF)) + Offset += MF.getFrameInfo()->getStackSize(); + + MI.SetMachineOperandConst(i-1, MachineOperand::MO_SignExtendedImmed, Offset); + std::cout << "offset = " << Offset << std::endl; +} + + +void PowerPCRegisterInfo::processFunctionBeforeFrameFinalized( + MachineFunction &MF) const { + // Do Nothing +} + +void PowerPCRegisterInfo::emitPrologue(MachineFunction &MF) const { + MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB + MachineBasicBlock::iterator MBBI = MBB.begin(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineInstr *MI; + + // Get the number of bytes to allocate from the FrameInfo + unsigned NumBytes = MFI->getStackSize(); + + if (MFI->hasCalls()) { + // When we have no frame pointer, we reserve argument space for call sites + // in the function immediately on entry to the current function. This + // eliminates the need for add/sub brackets around call sites. + // + NumBytes += MFI->getMaxCallFrameSize(); + + // Round the size to a multiple of the alignment (don't forget the 4 byte + // offset though). + unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment(); + NumBytes = ((NumBytes+4)+Align-1)/Align*Align - 4; + + // Store the incoming LR so it is preserved across calls + MI= BuildMI(PPC32::MovePCtoLR, 0, PPC32::LR).addReg(PPC32::LR); + MBB.insert(MBBI, MI); + MI= BuildMI(PPC32::MFSPR, 1, PPC32::R0).addImm(8); + MBB.insert(MBBI, MI); + MI= BuildMI(PPC32::STW, 3).addReg(PPC32::R0).addSImm(8).addReg(PPC32::R1); + MBB.insert(MBBI, MI); + } + + // Update frame info to pretend that this is part of the stack... + MFI->setStackSize(NumBytes); + + // adjust stack pointer: r1 -= numbytes + if (NumBytes) { + MI= BuildMI(PPC32::STWU, 2, PPC32::R1).addImm(-NumBytes).addReg(PPC32::R1); + MBB.insert(MBBI, MI); + } +} + +void PowerPCRegisterInfo::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + const MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineBasicBlock::iterator MBBI = prior(MBB.end()); + MachineInstr *MI; + assert(MBBI->getOpcode() == PPC32::BLR && + "Can only insert epilog into returning blocks"); + + // Get the number of bytes allocated from the FrameInfo... + unsigned NumBytes = MFI->getStackSize(); + + // adjust stack pointer back: r1 += numbytes + if (NumBytes) { + MI =BuildMI(PPC32::ADDI, 2, PPC32::R1) + .addReg(PPC32::R1) + .addSImm(NumBytes); + MBB.insert(MBBI, MI); + } + + // If we have calls, restore the LR value before we branch to it + if (MFI->hasCalls()) { + MI = BuildMI(PPC32::LWZ, 2, PPC32::R0).addSImm(8).addReg(PPC32::R1); + MBB.insert(MBBI, MI); + MI = BuildMI(PPC32::MTLR, 1).addReg(PPC32::R0); + MBB.insert(MBBI, MI); + } +} + + +#include "PowerPCGenRegisterInfo.inc" + +const TargetRegisterClass* +PowerPCRegisterInfo::getRegClassForType(const Type* Ty) const { + switch (Ty->getPrimitiveID()) { + case Type::LongTyID: + case Type::ULongTyID: assert(0 && "Long values can't fit in registers!"); + default: assert(0 && "Invalid type to getClass!"); + case Type::BoolTyID: + case Type::SByteTyID: + case Type::UByteTyID: + case Type::ShortTyID: + case Type::UShortTyID: + case Type::IntTyID: + case Type::UIntTyID: + case Type::PointerTyID: return &GPRCInstance; + + case Type::FloatTyID: + case Type::DoubleTyID: return &FPRCInstance; + } +} + diff --git a/lib/Target/PowerPC/PowerPCRegisterInfo.h b/lib/Target/PowerPC/PowerPCRegisterInfo.h new file mode 100644 index 00000000000..5cb39d48945 --- /dev/null +++ b/lib/Target/PowerPC/PowerPCRegisterInfo.h @@ -0,0 +1,58 @@ +//===- PowerPCRegisterInfo.h - PowerPC Register Information Impl -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the PowerPC implementation of the MRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef POWERPCREGISTERINFO_H +#define POWERPCREGISTERINFO_H + +#include "llvm/Target/MRegisterInfo.h" +#include "PowerPCGenRegisterInfo.h.inc" + +namespace llvm { + +class Type; + +struct PowerPCRegisterInfo : public PowerPCGenRegisterInfo { + PowerPCRegisterInfo(); + const TargetRegisterClass* getRegClassForType(const Type* Ty) const; + + /// Code Generation virtual methods... + int storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned SrcReg, int FrameIndex, + const TargetRegisterClass *RC) const; + + int loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC) const; + + int copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, + unsigned DestReg, unsigned SrcReg, + const TargetRegisterClass *RC) const; + + void eliminateCallFramePseudoInstr(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const; + + void eliminateFrameIndex(MachineFunction &MF, + MachineBasicBlock::iterator II) const; + + void processFunctionBeforeFrameFinalized(MachineFunction &MF) const; + + void emitPrologue(MachineFunction &MF) const; + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const; +}; + +} // end namespace llvm + +#endif diff --git a/lib/Target/PowerPC/PowerPCTargetMachine.h b/lib/Target/PowerPC/PowerPCTargetMachine.h new file mode 100644 index 00000000000..8c2b73129e6 --- /dev/null +++ b/lib/Target/PowerPC/PowerPCTargetMachine.h @@ -0,0 +1,57 @@ +//===-- PowerPCTargetMachine.h - Define TargetMachine for PowerPC -*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file was developed by the LLVM research group and is distributed under +// the University of Illinois Open Source License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the PowerPC specific subclass of TargetMachine. +// +//===----------------------------------------------------------------------===// + +#ifndef POWERPCTARGETMACHINE_H +#define POWERPCTARGETMACHINE_H + +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetFrameInfo.h" +#include "llvm/PassManager.h" +#include "PowerPCInstrInfo.h" +#include "PowerPCJITInfo.h" + +namespace llvm { + +class IntrinsicLowering; + +class PowerPCTargetMachine : public TargetMachine { + PowerPCInstrInfo InstrInfo; + TargetFrameInfo FrameInfo; + PowerPCJITInfo JITInfo; +public: + PowerPCTargetMachine(const Module &M, IntrinsicLowering *IL); + + virtual const PowerPCInstrInfo *getInstrInfo() const { return &InstrInfo; } + virtual const TargetFrameInfo *getFrameInfo() const { return &FrameInfo; } + virtual const MRegisterInfo *getRegisterInfo() const { + return &InstrInfo.getRegisterInfo(); + } + virtual TargetJITInfo *getJITInfo() { + return &JITInfo; + } + + /// addPassesToEmitMachineCode - Add passes to the specified pass manager to + /// get machine code emitted. This uses a MachineCodeEmitter object to handle + /// actually outputting the machine code and resolving things like the address + /// of functions. This method should returns true if machine code emission is + /// not supported. + /// + virtual bool addPassesToEmitMachineCode(FunctionPassManager &PM, + MachineCodeEmitter &MCE); + + virtual bool addPassesToEmitAssembly(PassManager &PM, std::ostream &Out); +}; + +} // end namespace llvm + +#endif diff --git a/lib/Target/PowerPC/README.txt b/lib/Target/PowerPC/README.txt new file mode 100644 index 00000000000..06fc979458e --- /dev/null +++ b/lib/Target/PowerPC/README.txt @@ -0,0 +1,26 @@ + +PowerPC backend skeleton +------------------------ + +Someday we'd like to have a PowerPC backend. Unfortunately, this +is not yet that day. + +This directory contains mainly stubs and placeholders; there is no +binary machine code emitter, no assembly writer, and no instruction +selector here. Most of the functions in these files call abort() +or fail assertions on purpose, just to reinforce the fact that they +don't work. + +If you want to use LLVM on the PowerPC *today*, use the C Backend +(llc -march=c). It generates C code that you can compile with the +native GCC compiler and run. A distant second choice would be the +Interpreter (lli --force-interpreter=true). + +A few things *are* really here, including: + * PowerPC register file definition in TableGen format + * PowerPC definitions of TargetMachine and other target-specific classes + +"Patches," as they say, "are accepted." + +$Date$ +