2002-11-22 22:42:50 +00:00
|
|
|
//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
|
2005-04-21 23:38:14 +00:00
|
|
|
//
|
2003-10-20 19:43:21 +00:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-04-21 23:38:14 +00:00
|
|
|
//
|
2003-10-20 19:43:21 +00:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-10-25 22:55:53 +00:00
|
|
|
//
|
2003-01-14 22:00:31 +00:00
|
|
|
// This file contains the X86 implementation of the TargetInstrInfo class.
|
2002-10-25 22:55:53 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2002-10-29 21:05:24 +00:00
|
|
|
#include "X86InstrInfo.h"
|
2002-12-03 05:42:53 +00:00
|
|
|
#include "X86.h"
|
2006-05-30 21:45:53 +00:00
|
|
|
#include "X86GenInstrInfo.inc"
|
2005-01-02 02:37:07 +00:00
|
|
|
#include "X86InstrBuilder.h"
|
2008-01-04 23:57:37 +00:00
|
|
|
#include "X86MachineFunctionInfo.h"
|
2006-05-30 21:45:53 +00:00
|
|
|
#include "X86Subtarget.h"
|
|
|
|
#include "X86TargetMachine.h"
|
2009-06-27 04:38:55 +00:00
|
|
|
#include "llvm/GlobalVariable.h"
|
2009-01-05 17:59:02 +00:00
|
|
|
#include "llvm/DerivedTypes.h"
|
2007-09-07 04:06:50 +00:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2008-12-03 05:21:24 +00:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2008-01-04 23:57:37 +00:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2003-05-24 00:09:50 +00:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2007-12-31 04:13:23 +00:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2006-12-01 21:52:41 +00:00
|
|
|
#include "llvm/CodeGen/LiveVariables.h"
|
2008-01-07 01:35:02 +00:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2009-07-08 18:01:40 +00:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2007-09-25 01:57:46 +00:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2008-04-16 20:10:13 +00:00
|
|
|
#include "llvm/Target/TargetAsmInfo.h"
|
2003-11-11 22:41:34 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2008-01-07 01:35:02 +00:00
|
|
|
namespace {
|
|
|
|
cl::opt<bool>
|
|
|
|
NoFusing("disable-spill-fusing",
|
|
|
|
cl::desc("Disable fusing of spill code into instructions"));
|
|
|
|
cl::opt<bool>
|
|
|
|
PrintFailedFusing("print-failed-fuse-candidates",
|
|
|
|
cl::desc("Print instructions that the allocator wants to"
|
|
|
|
" fuse, but the X86 backend currently can't"),
|
|
|
|
cl::Hidden);
|
2008-04-01 23:26:12 +00:00
|
|
|
cl::opt<bool>
|
|
|
|
ReMatPICStubLoad("remat-pic-stub-load",
|
|
|
|
cl::desc("Re-materialize load from stub in PIC mode"),
|
|
|
|
cl::init(false), cl::Hidden);
|
2008-01-07 01:35:02 +00:00
|
|
|
}
|
|
|
|
|
2006-05-30 21:45:53 +00:00
|
|
|
X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
|
2008-01-01 01:03:04 +00:00
|
|
|
: TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
|
2006-09-08 06:48:29 +00:00
|
|
|
TM(tm), RI(tm, *this) {
|
2008-01-07 01:35:02 +00:00
|
|
|
SmallVector<unsigned,16> AmbEntries;
|
|
|
|
static const unsigned OpTbl2Addr[][2] = {
|
|
|
|
{ X86::ADC32ri, X86::ADC32mi },
|
|
|
|
{ X86::ADC32ri8, X86::ADC32mi8 },
|
|
|
|
{ X86::ADC32rr, X86::ADC32mr },
|
|
|
|
{ X86::ADC64ri32, X86::ADC64mi32 },
|
|
|
|
{ X86::ADC64ri8, X86::ADC64mi8 },
|
|
|
|
{ X86::ADC64rr, X86::ADC64mr },
|
|
|
|
{ X86::ADD16ri, X86::ADD16mi },
|
|
|
|
{ X86::ADD16ri8, X86::ADD16mi8 },
|
|
|
|
{ X86::ADD16rr, X86::ADD16mr },
|
|
|
|
{ X86::ADD32ri, X86::ADD32mi },
|
|
|
|
{ X86::ADD32ri8, X86::ADD32mi8 },
|
|
|
|
{ X86::ADD32rr, X86::ADD32mr },
|
|
|
|
{ X86::ADD64ri32, X86::ADD64mi32 },
|
|
|
|
{ X86::ADD64ri8, X86::ADD64mi8 },
|
|
|
|
{ X86::ADD64rr, X86::ADD64mr },
|
|
|
|
{ X86::ADD8ri, X86::ADD8mi },
|
|
|
|
{ X86::ADD8rr, X86::ADD8mr },
|
|
|
|
{ X86::AND16ri, X86::AND16mi },
|
|
|
|
{ X86::AND16ri8, X86::AND16mi8 },
|
|
|
|
{ X86::AND16rr, X86::AND16mr },
|
|
|
|
{ X86::AND32ri, X86::AND32mi },
|
|
|
|
{ X86::AND32ri8, X86::AND32mi8 },
|
|
|
|
{ X86::AND32rr, X86::AND32mr },
|
|
|
|
{ X86::AND64ri32, X86::AND64mi32 },
|
|
|
|
{ X86::AND64ri8, X86::AND64mi8 },
|
|
|
|
{ X86::AND64rr, X86::AND64mr },
|
|
|
|
{ X86::AND8ri, X86::AND8mi },
|
|
|
|
{ X86::AND8rr, X86::AND8mr },
|
|
|
|
{ X86::DEC16r, X86::DEC16m },
|
|
|
|
{ X86::DEC32r, X86::DEC32m },
|
|
|
|
{ X86::DEC64_16r, X86::DEC64_16m },
|
|
|
|
{ X86::DEC64_32r, X86::DEC64_32m },
|
|
|
|
{ X86::DEC64r, X86::DEC64m },
|
|
|
|
{ X86::DEC8r, X86::DEC8m },
|
|
|
|
{ X86::INC16r, X86::INC16m },
|
|
|
|
{ X86::INC32r, X86::INC32m },
|
|
|
|
{ X86::INC64_16r, X86::INC64_16m },
|
|
|
|
{ X86::INC64_32r, X86::INC64_32m },
|
|
|
|
{ X86::INC64r, X86::INC64m },
|
|
|
|
{ X86::INC8r, X86::INC8m },
|
|
|
|
{ X86::NEG16r, X86::NEG16m },
|
|
|
|
{ X86::NEG32r, X86::NEG32m },
|
|
|
|
{ X86::NEG64r, X86::NEG64m },
|
|
|
|
{ X86::NEG8r, X86::NEG8m },
|
|
|
|
{ X86::NOT16r, X86::NOT16m },
|
|
|
|
{ X86::NOT32r, X86::NOT32m },
|
|
|
|
{ X86::NOT64r, X86::NOT64m },
|
|
|
|
{ X86::NOT8r, X86::NOT8m },
|
|
|
|
{ X86::OR16ri, X86::OR16mi },
|
|
|
|
{ X86::OR16ri8, X86::OR16mi8 },
|
|
|
|
{ X86::OR16rr, X86::OR16mr },
|
|
|
|
{ X86::OR32ri, X86::OR32mi },
|
|
|
|
{ X86::OR32ri8, X86::OR32mi8 },
|
|
|
|
{ X86::OR32rr, X86::OR32mr },
|
|
|
|
{ X86::OR64ri32, X86::OR64mi32 },
|
|
|
|
{ X86::OR64ri8, X86::OR64mi8 },
|
|
|
|
{ X86::OR64rr, X86::OR64mr },
|
|
|
|
{ X86::OR8ri, X86::OR8mi },
|
|
|
|
{ X86::OR8rr, X86::OR8mr },
|
|
|
|
{ X86::ROL16r1, X86::ROL16m1 },
|
|
|
|
{ X86::ROL16rCL, X86::ROL16mCL },
|
|
|
|
{ X86::ROL16ri, X86::ROL16mi },
|
|
|
|
{ X86::ROL32r1, X86::ROL32m1 },
|
|
|
|
{ X86::ROL32rCL, X86::ROL32mCL },
|
|
|
|
{ X86::ROL32ri, X86::ROL32mi },
|
|
|
|
{ X86::ROL64r1, X86::ROL64m1 },
|
|
|
|
{ X86::ROL64rCL, X86::ROL64mCL },
|
|
|
|
{ X86::ROL64ri, X86::ROL64mi },
|
|
|
|
{ X86::ROL8r1, X86::ROL8m1 },
|
|
|
|
{ X86::ROL8rCL, X86::ROL8mCL },
|
|
|
|
{ X86::ROL8ri, X86::ROL8mi },
|
|
|
|
{ X86::ROR16r1, X86::ROR16m1 },
|
|
|
|
{ X86::ROR16rCL, X86::ROR16mCL },
|
|
|
|
{ X86::ROR16ri, X86::ROR16mi },
|
|
|
|
{ X86::ROR32r1, X86::ROR32m1 },
|
|
|
|
{ X86::ROR32rCL, X86::ROR32mCL },
|
|
|
|
{ X86::ROR32ri, X86::ROR32mi },
|
|
|
|
{ X86::ROR64r1, X86::ROR64m1 },
|
|
|
|
{ X86::ROR64rCL, X86::ROR64mCL },
|
|
|
|
{ X86::ROR64ri, X86::ROR64mi },
|
|
|
|
{ X86::ROR8r1, X86::ROR8m1 },
|
|
|
|
{ X86::ROR8rCL, X86::ROR8mCL },
|
|
|
|
{ X86::ROR8ri, X86::ROR8mi },
|
|
|
|
{ X86::SAR16r1, X86::SAR16m1 },
|
|
|
|
{ X86::SAR16rCL, X86::SAR16mCL },
|
|
|
|
{ X86::SAR16ri, X86::SAR16mi },
|
|
|
|
{ X86::SAR32r1, X86::SAR32m1 },
|
|
|
|
{ X86::SAR32rCL, X86::SAR32mCL },
|
|
|
|
{ X86::SAR32ri, X86::SAR32mi },
|
|
|
|
{ X86::SAR64r1, X86::SAR64m1 },
|
|
|
|
{ X86::SAR64rCL, X86::SAR64mCL },
|
|
|
|
{ X86::SAR64ri, X86::SAR64mi },
|
|
|
|
{ X86::SAR8r1, X86::SAR8m1 },
|
|
|
|
{ X86::SAR8rCL, X86::SAR8mCL },
|
|
|
|
{ X86::SAR8ri, X86::SAR8mi },
|
|
|
|
{ X86::SBB32ri, X86::SBB32mi },
|
|
|
|
{ X86::SBB32ri8, X86::SBB32mi8 },
|
|
|
|
{ X86::SBB32rr, X86::SBB32mr },
|
|
|
|
{ X86::SBB64ri32, X86::SBB64mi32 },
|
|
|
|
{ X86::SBB64ri8, X86::SBB64mi8 },
|
|
|
|
{ X86::SBB64rr, X86::SBB64mr },
|
|
|
|
{ X86::SHL16rCL, X86::SHL16mCL },
|
|
|
|
{ X86::SHL16ri, X86::SHL16mi },
|
|
|
|
{ X86::SHL32rCL, X86::SHL32mCL },
|
|
|
|
{ X86::SHL32ri, X86::SHL32mi },
|
|
|
|
{ X86::SHL64rCL, X86::SHL64mCL },
|
|
|
|
{ X86::SHL64ri, X86::SHL64mi },
|
|
|
|
{ X86::SHL8rCL, X86::SHL8mCL },
|
|
|
|
{ X86::SHL8ri, X86::SHL8mi },
|
|
|
|
{ X86::SHLD16rrCL, X86::SHLD16mrCL },
|
|
|
|
{ X86::SHLD16rri8, X86::SHLD16mri8 },
|
|
|
|
{ X86::SHLD32rrCL, X86::SHLD32mrCL },
|
|
|
|
{ X86::SHLD32rri8, X86::SHLD32mri8 },
|
|
|
|
{ X86::SHLD64rrCL, X86::SHLD64mrCL },
|
|
|
|
{ X86::SHLD64rri8, X86::SHLD64mri8 },
|
|
|
|
{ X86::SHR16r1, X86::SHR16m1 },
|
|
|
|
{ X86::SHR16rCL, X86::SHR16mCL },
|
|
|
|
{ X86::SHR16ri, X86::SHR16mi },
|
|
|
|
{ X86::SHR32r1, X86::SHR32m1 },
|
|
|
|
{ X86::SHR32rCL, X86::SHR32mCL },
|
|
|
|
{ X86::SHR32ri, X86::SHR32mi },
|
|
|
|
{ X86::SHR64r1, X86::SHR64m1 },
|
|
|
|
{ X86::SHR64rCL, X86::SHR64mCL },
|
|
|
|
{ X86::SHR64ri, X86::SHR64mi },
|
|
|
|
{ X86::SHR8r1, X86::SHR8m1 },
|
|
|
|
{ X86::SHR8rCL, X86::SHR8mCL },
|
|
|
|
{ X86::SHR8ri, X86::SHR8mi },
|
|
|
|
{ X86::SHRD16rrCL, X86::SHRD16mrCL },
|
|
|
|
{ X86::SHRD16rri8, X86::SHRD16mri8 },
|
|
|
|
{ X86::SHRD32rrCL, X86::SHRD32mrCL },
|
|
|
|
{ X86::SHRD32rri8, X86::SHRD32mri8 },
|
|
|
|
{ X86::SHRD64rrCL, X86::SHRD64mrCL },
|
|
|
|
{ X86::SHRD64rri8, X86::SHRD64mri8 },
|
|
|
|
{ X86::SUB16ri, X86::SUB16mi },
|
|
|
|
{ X86::SUB16ri8, X86::SUB16mi8 },
|
|
|
|
{ X86::SUB16rr, X86::SUB16mr },
|
|
|
|
{ X86::SUB32ri, X86::SUB32mi },
|
|
|
|
{ X86::SUB32ri8, X86::SUB32mi8 },
|
|
|
|
{ X86::SUB32rr, X86::SUB32mr },
|
|
|
|
{ X86::SUB64ri32, X86::SUB64mi32 },
|
|
|
|
{ X86::SUB64ri8, X86::SUB64mi8 },
|
|
|
|
{ X86::SUB64rr, X86::SUB64mr },
|
|
|
|
{ X86::SUB8ri, X86::SUB8mi },
|
|
|
|
{ X86::SUB8rr, X86::SUB8mr },
|
|
|
|
{ X86::XOR16ri, X86::XOR16mi },
|
|
|
|
{ X86::XOR16ri8, X86::XOR16mi8 },
|
|
|
|
{ X86::XOR16rr, X86::XOR16mr },
|
|
|
|
{ X86::XOR32ri, X86::XOR32mi },
|
|
|
|
{ X86::XOR32ri8, X86::XOR32mi8 },
|
|
|
|
{ X86::XOR32rr, X86::XOR32mr },
|
|
|
|
{ X86::XOR64ri32, X86::XOR64mi32 },
|
|
|
|
{ X86::XOR64ri8, X86::XOR64mi8 },
|
|
|
|
{ X86::XOR64rr, X86::XOR64mr },
|
|
|
|
{ X86::XOR8ri, X86::XOR8mi },
|
|
|
|
{ X86::XOR8rr, X86::XOR8mr }
|
|
|
|
};
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
|
|
|
|
unsigned RegOp = OpTbl2Addr[i][0];
|
|
|
|
unsigned MemOp = OpTbl2Addr[i][1];
|
2008-07-07 17:46:23 +00:00
|
|
|
if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp,
|
|
|
|
MemOp)).second)
|
2008-01-07 01:35:02 +00:00
|
|
|
assert(false && "Duplicated entries?");
|
|
|
|
unsigned AuxInfo = 0 | (1 << 4) | (1 << 5); // Index 0,folded load and store
|
|
|
|
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
|
2008-07-07 17:46:23 +00:00
|
|
|
std::make_pair(RegOp,
|
|
|
|
AuxInfo))).second)
|
2008-01-07 01:35:02 +00:00
|
|
|
AmbEntries.push_back(MemOp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the third value is 1, then it's folding either a load or a store.
|
|
|
|
static const unsigned OpTbl0[][3] = {
|
2009-01-15 17:57:09 +00:00
|
|
|
{ X86::BT16ri8, X86::BT16mi8, 1 },
|
|
|
|
{ X86::BT32ri8, X86::BT32mi8, 1 },
|
|
|
|
{ X86::BT64ri8, X86::BT64mi8, 1 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::CALL32r, X86::CALL32m, 1 },
|
|
|
|
{ X86::CALL64r, X86::CALL64m, 1 },
|
|
|
|
{ X86::CMP16ri, X86::CMP16mi, 1 },
|
|
|
|
{ X86::CMP16ri8, X86::CMP16mi8, 1 },
|
2008-03-25 16:53:19 +00:00
|
|
|
{ X86::CMP16rr, X86::CMP16mr, 1 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::CMP32ri, X86::CMP32mi, 1 },
|
|
|
|
{ X86::CMP32ri8, X86::CMP32mi8, 1 },
|
2008-03-25 16:53:19 +00:00
|
|
|
{ X86::CMP32rr, X86::CMP32mr, 1 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::CMP64ri32, X86::CMP64mi32, 1 },
|
|
|
|
{ X86::CMP64ri8, X86::CMP64mi8, 1 },
|
2008-03-25 16:53:19 +00:00
|
|
|
{ X86::CMP64rr, X86::CMP64mr, 1 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::CMP8ri, X86::CMP8mi, 1 },
|
2008-03-25 16:53:19 +00:00
|
|
|
{ X86::CMP8rr, X86::CMP8mr, 1 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::DIV16r, X86::DIV16m, 1 },
|
|
|
|
{ X86::DIV32r, X86::DIV32m, 1 },
|
|
|
|
{ X86::DIV64r, X86::DIV64m, 1 },
|
|
|
|
{ X86::DIV8r, X86::DIV8m, 1 },
|
2008-08-08 18:30:21 +00:00
|
|
|
{ X86::EXTRACTPSrr, X86::EXTRACTPSmr, 0 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::FsMOVAPDrr, X86::MOVSDmr, 0 },
|
|
|
|
{ X86::FsMOVAPSrr, X86::MOVSSmr, 0 },
|
|
|
|
{ X86::IDIV16r, X86::IDIV16m, 1 },
|
|
|
|
{ X86::IDIV32r, X86::IDIV32m, 1 },
|
|
|
|
{ X86::IDIV64r, X86::IDIV64m, 1 },
|
|
|
|
{ X86::IDIV8r, X86::IDIV8m, 1 },
|
|
|
|
{ X86::IMUL16r, X86::IMUL16m, 1 },
|
|
|
|
{ X86::IMUL32r, X86::IMUL32m, 1 },
|
|
|
|
{ X86::IMUL64r, X86::IMUL64m, 1 },
|
|
|
|
{ X86::IMUL8r, X86::IMUL8m, 1 },
|
|
|
|
{ X86::JMP32r, X86::JMP32m, 1 },
|
|
|
|
{ X86::JMP64r, X86::JMP64m, 1 },
|
|
|
|
{ X86::MOV16ri, X86::MOV16mi, 0 },
|
|
|
|
{ X86::MOV16rr, X86::MOV16mr, 0 },
|
|
|
|
{ X86::MOV32ri, X86::MOV32mi, 0 },
|
|
|
|
{ X86::MOV32rr, X86::MOV32mr, 0 },
|
|
|
|
{ X86::MOV64ri32, X86::MOV64mi32, 0 },
|
|
|
|
{ X86::MOV64rr, X86::MOV64mr, 0 },
|
|
|
|
{ X86::MOV8ri, X86::MOV8mi, 0 },
|
|
|
|
{ X86::MOV8rr, X86::MOV8mr, 0 },
|
2009-04-15 19:48:28 +00:00
|
|
|
{ X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, 0 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::MOVAPDrr, X86::MOVAPDmr, 0 },
|
|
|
|
{ X86::MOVAPSrr, X86::MOVAPSmr, 0 },
|
2009-01-09 02:40:34 +00:00
|
|
|
{ X86::MOVDQArr, X86::MOVDQAmr, 0 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::MOVPDI2DIrr, X86::MOVPDI2DImr, 0 },
|
|
|
|
{ X86::MOVPQIto64rr,X86::MOVPQI2QImr, 0 },
|
|
|
|
{ X86::MOVPS2SSrr, X86::MOVPS2SSmr, 0 },
|
|
|
|
{ X86::MOVSDrr, X86::MOVSDmr, 0 },
|
|
|
|
{ X86::MOVSDto64rr, X86::MOVSDto64mr, 0 },
|
|
|
|
{ X86::MOVSS2DIrr, X86::MOVSS2DImr, 0 },
|
|
|
|
{ X86::MOVSSrr, X86::MOVSSmr, 0 },
|
|
|
|
{ X86::MOVUPDrr, X86::MOVUPDmr, 0 },
|
|
|
|
{ X86::MOVUPSrr, X86::MOVUPSmr, 0 },
|
|
|
|
{ X86::MUL16r, X86::MUL16m, 1 },
|
|
|
|
{ X86::MUL32r, X86::MUL32m, 1 },
|
|
|
|
{ X86::MUL64r, X86::MUL64m, 1 },
|
|
|
|
{ X86::MUL8r, X86::MUL8m, 1 },
|
|
|
|
{ X86::SETAEr, X86::SETAEm, 0 },
|
|
|
|
{ X86::SETAr, X86::SETAm, 0 },
|
|
|
|
{ X86::SETBEr, X86::SETBEm, 0 },
|
|
|
|
{ X86::SETBr, X86::SETBm, 0 },
|
|
|
|
{ X86::SETEr, X86::SETEm, 0 },
|
|
|
|
{ X86::SETGEr, X86::SETGEm, 0 },
|
|
|
|
{ X86::SETGr, X86::SETGm, 0 },
|
|
|
|
{ X86::SETLEr, X86::SETLEm, 0 },
|
|
|
|
{ X86::SETLr, X86::SETLm, 0 },
|
|
|
|
{ X86::SETNEr, X86::SETNEm, 0 },
|
2008-12-02 00:07:05 +00:00
|
|
|
{ X86::SETNOr, X86::SETNOm, 0 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::SETNPr, X86::SETNPm, 0 },
|
|
|
|
{ X86::SETNSr, X86::SETNSm, 0 },
|
2008-12-02 00:07:05 +00:00
|
|
|
{ X86::SETOr, X86::SETOm, 0 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::SETPr, X86::SETPm, 0 },
|
|
|
|
{ X86::SETSr, X86::SETSm, 0 },
|
|
|
|
{ X86::TAILJMPr, X86::TAILJMPm, 1 },
|
|
|
|
{ X86::TEST16ri, X86::TEST16mi, 1 },
|
|
|
|
{ X86::TEST32ri, X86::TEST32mi, 1 },
|
|
|
|
{ X86::TEST64ri32, X86::TEST64mi32, 1 },
|
2008-01-11 18:00:50 +00:00
|
|
|
{ X86::TEST8ri, X86::TEST8mi, 1 }
|
2008-01-07 01:35:02 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
|
|
|
|
unsigned RegOp = OpTbl0[i][0];
|
|
|
|
unsigned MemOp = OpTbl0[i][1];
|
2008-07-07 17:46:23 +00:00
|
|
|
if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp,
|
|
|
|
MemOp)).second)
|
2008-01-07 01:35:02 +00:00
|
|
|
assert(false && "Duplicated entries?");
|
|
|
|
unsigned FoldedLoad = OpTbl0[i][2];
|
|
|
|
// Index 0, folded load or store.
|
|
|
|
unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5);
|
|
|
|
if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
|
|
|
|
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
|
2008-07-07 17:46:23 +00:00
|
|
|
std::make_pair(RegOp, AuxInfo))).second)
|
2008-01-07 01:35:02 +00:00
|
|
|
AmbEntries.push_back(MemOp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const unsigned OpTbl1[][2] = {
|
|
|
|
{ X86::CMP16rr, X86::CMP16rm },
|
|
|
|
{ X86::CMP32rr, X86::CMP32rm },
|
|
|
|
{ X86::CMP64rr, X86::CMP64rm },
|
|
|
|
{ X86::CMP8rr, X86::CMP8rm },
|
|
|
|
{ X86::CVTSD2SSrr, X86::CVTSD2SSrm },
|
|
|
|
{ X86::CVTSI2SD64rr, X86::CVTSI2SD64rm },
|
|
|
|
{ X86::CVTSI2SDrr, X86::CVTSI2SDrm },
|
|
|
|
{ X86::CVTSI2SS64rr, X86::CVTSI2SS64rm },
|
|
|
|
{ X86::CVTSI2SSrr, X86::CVTSI2SSrm },
|
|
|
|
{ X86::CVTSS2SDrr, X86::CVTSS2SDrm },
|
|
|
|
{ X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm },
|
|
|
|
{ X86::CVTTSD2SIrr, X86::CVTTSD2SIrm },
|
|
|
|
{ X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm },
|
|
|
|
{ X86::CVTTSS2SIrr, X86::CVTTSS2SIrm },
|
|
|
|
{ X86::FsMOVAPDrr, X86::MOVSDrm },
|
|
|
|
{ X86::FsMOVAPSrr, X86::MOVSSrm },
|
|
|
|
{ X86::IMUL16rri, X86::IMUL16rmi },
|
|
|
|
{ X86::IMUL16rri8, X86::IMUL16rmi8 },
|
|
|
|
{ X86::IMUL32rri, X86::IMUL32rmi },
|
|
|
|
{ X86::IMUL32rri8, X86::IMUL32rmi8 },
|
|
|
|
{ X86::IMUL64rri32, X86::IMUL64rmi32 },
|
|
|
|
{ X86::IMUL64rri8, X86::IMUL64rmi8 },
|
|
|
|
{ X86::Int_CMPSDrr, X86::Int_CMPSDrm },
|
|
|
|
{ X86::Int_CMPSSrr, X86::Int_CMPSSrm },
|
|
|
|
{ X86::Int_COMISDrr, X86::Int_COMISDrm },
|
|
|
|
{ X86::Int_COMISSrr, X86::Int_COMISSrm },
|
|
|
|
{ X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm },
|
|
|
|
{ X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm },
|
|
|
|
{ X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm },
|
|
|
|
{ X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm },
|
|
|
|
{ X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm },
|
|
|
|
{ X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm },
|
|
|
|
{ X86::Int_CVTSD2SI64rr,X86::Int_CVTSD2SI64rm },
|
|
|
|
{ X86::Int_CVTSD2SIrr, X86::Int_CVTSD2SIrm },
|
|
|
|
{ X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm },
|
|
|
|
{ X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm },
|
|
|
|
{ X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm },
|
|
|
|
{ X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm },
|
|
|
|
{ X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm },
|
|
|
|
{ X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm },
|
|
|
|
{ X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm },
|
|
|
|
{ X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm },
|
|
|
|
{ X86::Int_CVTTPD2DQrr, X86::Int_CVTTPD2DQrm },
|
|
|
|
{ X86::Int_CVTTPS2DQrr, X86::Int_CVTTPS2DQrm },
|
|
|
|
{ X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm },
|
|
|
|
{ X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm },
|
|
|
|
{ X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm },
|
|
|
|
{ X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm },
|
|
|
|
{ X86::Int_UCOMISDrr, X86::Int_UCOMISDrm },
|
|
|
|
{ X86::Int_UCOMISSrr, X86::Int_UCOMISSrm },
|
|
|
|
{ X86::MOV16rr, X86::MOV16rm },
|
|
|
|
{ X86::MOV32rr, X86::MOV32rm },
|
|
|
|
{ X86::MOV64rr, X86::MOV64rm },
|
|
|
|
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm },
|
|
|
|
{ X86::MOV64toSDrr, X86::MOV64toSDrm },
|
|
|
|
{ X86::MOV8rr, X86::MOV8rm },
|
|
|
|
{ X86::MOVAPDrr, X86::MOVAPDrm },
|
|
|
|
{ X86::MOVAPSrr, X86::MOVAPSrm },
|
|
|
|
{ X86::MOVDDUPrr, X86::MOVDDUPrm },
|
|
|
|
{ X86::MOVDI2PDIrr, X86::MOVDI2PDIrm },
|
|
|
|
{ X86::MOVDI2SSrr, X86::MOVDI2SSrm },
|
2009-01-09 02:40:34 +00:00
|
|
|
{ X86::MOVDQArr, X86::MOVDQArm },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::MOVSD2PDrr, X86::MOVSD2PDrm },
|
|
|
|
{ X86::MOVSDrr, X86::MOVSDrm },
|
|
|
|
{ X86::MOVSHDUPrr, X86::MOVSHDUPrm },
|
|
|
|
{ X86::MOVSLDUPrr, X86::MOVSLDUPrm },
|
|
|
|
{ X86::MOVSS2PSrr, X86::MOVSS2PSrm },
|
|
|
|
{ X86::MOVSSrr, X86::MOVSSrm },
|
|
|
|
{ X86::MOVSX16rr8, X86::MOVSX16rm8 },
|
|
|
|
{ X86::MOVSX32rr16, X86::MOVSX32rm16 },
|
|
|
|
{ X86::MOVSX32rr8, X86::MOVSX32rm8 },
|
|
|
|
{ X86::MOVSX64rr16, X86::MOVSX64rm16 },
|
|
|
|
{ X86::MOVSX64rr32, X86::MOVSX64rm32 },
|
|
|
|
{ X86::MOVSX64rr8, X86::MOVSX64rm8 },
|
|
|
|
{ X86::MOVUPDrr, X86::MOVUPDrm },
|
|
|
|
{ X86::MOVUPSrr, X86::MOVUPSrm },
|
|
|
|
{ X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm },
|
|
|
|
{ X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm },
|
|
|
|
{ X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm },
|
|
|
|
{ X86::MOVZX16rr8, X86::MOVZX16rm8 },
|
|
|
|
{ X86::MOVZX32rr16, X86::MOVZX32rm16 },
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
{ X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::MOVZX32rr8, X86::MOVZX32rm8 },
|
|
|
|
{ X86::MOVZX64rr16, X86::MOVZX64rm16 },
|
2008-08-07 02:54:50 +00:00
|
|
|
{ X86::MOVZX64rr32, X86::MOVZX64rm32 },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::MOVZX64rr8, X86::MOVZX64rm8 },
|
|
|
|
{ X86::PSHUFDri, X86::PSHUFDmi },
|
|
|
|
{ X86::PSHUFHWri, X86::PSHUFHWmi },
|
|
|
|
{ X86::PSHUFLWri, X86::PSHUFLWmi },
|
|
|
|
{ X86::RCPPSr, X86::RCPPSm },
|
|
|
|
{ X86::RCPPSr_Int, X86::RCPPSm_Int },
|
|
|
|
{ X86::RSQRTPSr, X86::RSQRTPSm },
|
|
|
|
{ X86::RSQRTPSr_Int, X86::RSQRTPSm_Int },
|
|
|
|
{ X86::RSQRTSSr, X86::RSQRTSSm },
|
|
|
|
{ X86::RSQRTSSr_Int, X86::RSQRTSSm_Int },
|
|
|
|
{ X86::SQRTPDr, X86::SQRTPDm },
|
|
|
|
{ X86::SQRTPDr_Int, X86::SQRTPDm_Int },
|
|
|
|
{ X86::SQRTPSr, X86::SQRTPSm },
|
|
|
|
{ X86::SQRTPSr_Int, X86::SQRTPSm_Int },
|
|
|
|
{ X86::SQRTSDr, X86::SQRTSDm },
|
|
|
|
{ X86::SQRTSDr_Int, X86::SQRTSDm_Int },
|
|
|
|
{ X86::SQRTSSr, X86::SQRTSSm },
|
|
|
|
{ X86::SQRTSSr_Int, X86::SQRTSSm_Int },
|
|
|
|
{ X86::TEST16rr, X86::TEST16rm },
|
|
|
|
{ X86::TEST32rr, X86::TEST32rm },
|
|
|
|
{ X86::TEST64rr, X86::TEST64rm },
|
|
|
|
{ X86::TEST8rr, X86::TEST8rm },
|
|
|
|
// FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0
|
|
|
|
{ X86::UCOMISDrr, X86::UCOMISDrm },
|
2008-01-11 18:00:50 +00:00
|
|
|
{ X86::UCOMISSrr, X86::UCOMISSrm }
|
2008-01-07 01:35:02 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
|
|
|
|
unsigned RegOp = OpTbl1[i][0];
|
|
|
|
unsigned MemOp = OpTbl1[i][1];
|
2008-07-07 17:46:23 +00:00
|
|
|
if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp,
|
|
|
|
MemOp)).second)
|
2008-01-07 01:35:02 +00:00
|
|
|
assert(false && "Duplicated entries?");
|
|
|
|
unsigned AuxInfo = 1 | (1 << 4); // Index 1, folded load
|
|
|
|
if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
|
|
|
|
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
|
2008-07-07 17:46:23 +00:00
|
|
|
std::make_pair(RegOp, AuxInfo))).second)
|
2008-01-07 01:35:02 +00:00
|
|
|
AmbEntries.push_back(MemOp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const unsigned OpTbl2[][2] = {
|
|
|
|
{ X86::ADC32rr, X86::ADC32rm },
|
|
|
|
{ X86::ADC64rr, X86::ADC64rm },
|
|
|
|
{ X86::ADD16rr, X86::ADD16rm },
|
|
|
|
{ X86::ADD32rr, X86::ADD32rm },
|
|
|
|
{ X86::ADD64rr, X86::ADD64rm },
|
|
|
|
{ X86::ADD8rr, X86::ADD8rm },
|
|
|
|
{ X86::ADDPDrr, X86::ADDPDrm },
|
|
|
|
{ X86::ADDPSrr, X86::ADDPSrm },
|
|
|
|
{ X86::ADDSDrr, X86::ADDSDrm },
|
|
|
|
{ X86::ADDSSrr, X86::ADDSSrm },
|
|
|
|
{ X86::ADDSUBPDrr, X86::ADDSUBPDrm },
|
|
|
|
{ X86::ADDSUBPSrr, X86::ADDSUBPSrm },
|
|
|
|
{ X86::AND16rr, X86::AND16rm },
|
|
|
|
{ X86::AND32rr, X86::AND32rm },
|
|
|
|
{ X86::AND64rr, X86::AND64rm },
|
|
|
|
{ X86::AND8rr, X86::AND8rm },
|
|
|
|
{ X86::ANDNPDrr, X86::ANDNPDrm },
|
|
|
|
{ X86::ANDNPSrr, X86::ANDNPSrm },
|
|
|
|
{ X86::ANDPDrr, X86::ANDPDrm },
|
|
|
|
{ X86::ANDPSrr, X86::ANDPSrm },
|
|
|
|
{ X86::CMOVA16rr, X86::CMOVA16rm },
|
|
|
|
{ X86::CMOVA32rr, X86::CMOVA32rm },
|
|
|
|
{ X86::CMOVA64rr, X86::CMOVA64rm },
|
|
|
|
{ X86::CMOVAE16rr, X86::CMOVAE16rm },
|
|
|
|
{ X86::CMOVAE32rr, X86::CMOVAE32rm },
|
|
|
|
{ X86::CMOVAE64rr, X86::CMOVAE64rm },
|
|
|
|
{ X86::CMOVB16rr, X86::CMOVB16rm },
|
|
|
|
{ X86::CMOVB32rr, X86::CMOVB32rm },
|
|
|
|
{ X86::CMOVB64rr, X86::CMOVB64rm },
|
|
|
|
{ X86::CMOVBE16rr, X86::CMOVBE16rm },
|
|
|
|
{ X86::CMOVBE32rr, X86::CMOVBE32rm },
|
|
|
|
{ X86::CMOVBE64rr, X86::CMOVBE64rm },
|
|
|
|
{ X86::CMOVE16rr, X86::CMOVE16rm },
|
|
|
|
{ X86::CMOVE32rr, X86::CMOVE32rm },
|
|
|
|
{ X86::CMOVE64rr, X86::CMOVE64rm },
|
|
|
|
{ X86::CMOVG16rr, X86::CMOVG16rm },
|
|
|
|
{ X86::CMOVG32rr, X86::CMOVG32rm },
|
|
|
|
{ X86::CMOVG64rr, X86::CMOVG64rm },
|
|
|
|
{ X86::CMOVGE16rr, X86::CMOVGE16rm },
|
|
|
|
{ X86::CMOVGE32rr, X86::CMOVGE32rm },
|
|
|
|
{ X86::CMOVGE64rr, X86::CMOVGE64rm },
|
|
|
|
{ X86::CMOVL16rr, X86::CMOVL16rm },
|
|
|
|
{ X86::CMOVL32rr, X86::CMOVL32rm },
|
|
|
|
{ X86::CMOVL64rr, X86::CMOVL64rm },
|
|
|
|
{ X86::CMOVLE16rr, X86::CMOVLE16rm },
|
|
|
|
{ X86::CMOVLE32rr, X86::CMOVLE32rm },
|
|
|
|
{ X86::CMOVLE64rr, X86::CMOVLE64rm },
|
|
|
|
{ X86::CMOVNE16rr, X86::CMOVNE16rm },
|
|
|
|
{ X86::CMOVNE32rr, X86::CMOVNE32rm },
|
|
|
|
{ X86::CMOVNE64rr, X86::CMOVNE64rm },
|
2009-01-07 00:44:53 +00:00
|
|
|
{ X86::CMOVNO16rr, X86::CMOVNO16rm },
|
|
|
|
{ X86::CMOVNO32rr, X86::CMOVNO32rm },
|
|
|
|
{ X86::CMOVNO64rr, X86::CMOVNO64rm },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::CMOVNP16rr, X86::CMOVNP16rm },
|
|
|
|
{ X86::CMOVNP32rr, X86::CMOVNP32rm },
|
|
|
|
{ X86::CMOVNP64rr, X86::CMOVNP64rm },
|
|
|
|
{ X86::CMOVNS16rr, X86::CMOVNS16rm },
|
|
|
|
{ X86::CMOVNS32rr, X86::CMOVNS32rm },
|
|
|
|
{ X86::CMOVNS64rr, X86::CMOVNS64rm },
|
2009-01-07 00:35:10 +00:00
|
|
|
{ X86::CMOVO16rr, X86::CMOVO16rm },
|
|
|
|
{ X86::CMOVO32rr, X86::CMOVO32rm },
|
|
|
|
{ X86::CMOVO64rr, X86::CMOVO64rm },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::CMOVP16rr, X86::CMOVP16rm },
|
|
|
|
{ X86::CMOVP32rr, X86::CMOVP32rm },
|
|
|
|
{ X86::CMOVP64rr, X86::CMOVP64rm },
|
|
|
|
{ X86::CMOVS16rr, X86::CMOVS16rm },
|
|
|
|
{ X86::CMOVS32rr, X86::CMOVS32rm },
|
|
|
|
{ X86::CMOVS64rr, X86::CMOVS64rm },
|
|
|
|
{ X86::CMPPDrri, X86::CMPPDrmi },
|
|
|
|
{ X86::CMPPSrri, X86::CMPPSrmi },
|
|
|
|
{ X86::CMPSDrr, X86::CMPSDrm },
|
|
|
|
{ X86::CMPSSrr, X86::CMPSSrm },
|
|
|
|
{ X86::DIVPDrr, X86::DIVPDrm },
|
|
|
|
{ X86::DIVPSrr, X86::DIVPSrm },
|
|
|
|
{ X86::DIVSDrr, X86::DIVSDrm },
|
|
|
|
{ X86::DIVSSrr, X86::DIVSSrm },
|
2008-05-02 17:01:01 +00:00
|
|
|
{ X86::FsANDNPDrr, X86::FsANDNPDrm },
|
|
|
|
{ X86::FsANDNPSrr, X86::FsANDNPSrm },
|
|
|
|
{ X86::FsANDPDrr, X86::FsANDPDrm },
|
|
|
|
{ X86::FsANDPSrr, X86::FsANDPSrm },
|
|
|
|
{ X86::FsORPDrr, X86::FsORPDrm },
|
|
|
|
{ X86::FsORPSrr, X86::FsORPSrm },
|
|
|
|
{ X86::FsXORPDrr, X86::FsXORPDrm },
|
|
|
|
{ X86::FsXORPSrr, X86::FsXORPSrm },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::HADDPDrr, X86::HADDPDrm },
|
|
|
|
{ X86::HADDPSrr, X86::HADDPSrm },
|
|
|
|
{ X86::HSUBPDrr, X86::HSUBPDrm },
|
|
|
|
{ X86::HSUBPSrr, X86::HSUBPSrm },
|
|
|
|
{ X86::IMUL16rr, X86::IMUL16rm },
|
|
|
|
{ X86::IMUL32rr, X86::IMUL32rm },
|
|
|
|
{ X86::IMUL64rr, X86::IMUL64rm },
|
|
|
|
{ X86::MAXPDrr, X86::MAXPDrm },
|
|
|
|
{ X86::MAXPDrr_Int, X86::MAXPDrm_Int },
|
|
|
|
{ X86::MAXPSrr, X86::MAXPSrm },
|
|
|
|
{ X86::MAXPSrr_Int, X86::MAXPSrm_Int },
|
|
|
|
{ X86::MAXSDrr, X86::MAXSDrm },
|
|
|
|
{ X86::MAXSDrr_Int, X86::MAXSDrm_Int },
|
|
|
|
{ X86::MAXSSrr, X86::MAXSSrm },
|
|
|
|
{ X86::MAXSSrr_Int, X86::MAXSSrm_Int },
|
|
|
|
{ X86::MINPDrr, X86::MINPDrm },
|
|
|
|
{ X86::MINPDrr_Int, X86::MINPDrm_Int },
|
|
|
|
{ X86::MINPSrr, X86::MINPSrm },
|
|
|
|
{ X86::MINPSrr_Int, X86::MINPSrm_Int },
|
|
|
|
{ X86::MINSDrr, X86::MINSDrm },
|
|
|
|
{ X86::MINSDrr_Int, X86::MINSDrm_Int },
|
|
|
|
{ X86::MINSSrr, X86::MINSSrm },
|
|
|
|
{ X86::MINSSrr_Int, X86::MINSSrm_Int },
|
|
|
|
{ X86::MULPDrr, X86::MULPDrm },
|
|
|
|
{ X86::MULPSrr, X86::MULPSrm },
|
|
|
|
{ X86::MULSDrr, X86::MULSDrm },
|
|
|
|
{ X86::MULSSrr, X86::MULSSrm },
|
|
|
|
{ X86::OR16rr, X86::OR16rm },
|
|
|
|
{ X86::OR32rr, X86::OR32rm },
|
|
|
|
{ X86::OR64rr, X86::OR64rm },
|
|
|
|
{ X86::OR8rr, X86::OR8rm },
|
|
|
|
{ X86::ORPDrr, X86::ORPDrm },
|
|
|
|
{ X86::ORPSrr, X86::ORPSrm },
|
|
|
|
{ X86::PACKSSDWrr, X86::PACKSSDWrm },
|
|
|
|
{ X86::PACKSSWBrr, X86::PACKSSWBrm },
|
|
|
|
{ X86::PACKUSWBrr, X86::PACKUSWBrm },
|
|
|
|
{ X86::PADDBrr, X86::PADDBrm },
|
|
|
|
{ X86::PADDDrr, X86::PADDDrm },
|
|
|
|
{ X86::PADDQrr, X86::PADDQrm },
|
|
|
|
{ X86::PADDSBrr, X86::PADDSBrm },
|
|
|
|
{ X86::PADDSWrr, X86::PADDSWrm },
|
|
|
|
{ X86::PADDWrr, X86::PADDWrm },
|
|
|
|
{ X86::PANDNrr, X86::PANDNrm },
|
|
|
|
{ X86::PANDrr, X86::PANDrm },
|
|
|
|
{ X86::PAVGBrr, X86::PAVGBrm },
|
|
|
|
{ X86::PAVGWrr, X86::PAVGWrm },
|
|
|
|
{ X86::PCMPEQBrr, X86::PCMPEQBrm },
|
|
|
|
{ X86::PCMPEQDrr, X86::PCMPEQDrm },
|
|
|
|
{ X86::PCMPEQWrr, X86::PCMPEQWrm },
|
|
|
|
{ X86::PCMPGTBrr, X86::PCMPGTBrm },
|
|
|
|
{ X86::PCMPGTDrr, X86::PCMPGTDrm },
|
|
|
|
{ X86::PCMPGTWrr, X86::PCMPGTWrm },
|
|
|
|
{ X86::PINSRWrri, X86::PINSRWrmi },
|
|
|
|
{ X86::PMADDWDrr, X86::PMADDWDrm },
|
|
|
|
{ X86::PMAXSWrr, X86::PMAXSWrm },
|
|
|
|
{ X86::PMAXUBrr, X86::PMAXUBrm },
|
|
|
|
{ X86::PMINSWrr, X86::PMINSWrm },
|
|
|
|
{ X86::PMINUBrr, X86::PMINUBrm },
|
2008-05-23 17:49:40 +00:00
|
|
|
{ X86::PMULDQrr, X86::PMULDQrm },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::PMULHUWrr, X86::PMULHUWrm },
|
|
|
|
{ X86::PMULHWrr, X86::PMULHWrm },
|
2008-05-23 17:49:40 +00:00
|
|
|
{ X86::PMULLDrr, X86::PMULLDrm },
|
|
|
|
{ X86::PMULLDrr_int, X86::PMULLDrm_int },
|
2008-01-07 01:35:02 +00:00
|
|
|
{ X86::PMULLWrr, X86::PMULLWrm },
|
|
|
|
{ X86::PMULUDQrr, X86::PMULUDQrm },
|
|
|
|
{ X86::PORrr, X86::PORrm },
|
|
|
|
{ X86::PSADBWrr, X86::PSADBWrm },
|
|
|
|
{ X86::PSLLDrr, X86::PSLLDrm },
|
|
|
|
{ X86::PSLLQrr, X86::PSLLQrm },
|
|
|
|
{ X86::PSLLWrr, X86::PSLLWrm },
|
|
|
|
{ X86::PSRADrr, X86::PSRADrm },
|
|
|
|
{ X86::PSRAWrr, X86::PSRAWrm },
|
|
|
|
{ X86::PSRLDrr, X86::PSRLDrm },
|
|
|
|
{ X86::PSRLQrr, X86::PSRLQrm },
|
|
|
|
{ X86::PSRLWrr, X86::PSRLWrm },
|
|
|
|
{ X86::PSUBBrr, X86::PSUBBrm },
|
|
|
|
{ X86::PSUBDrr, X86::PSUBDrm },
|
|
|
|
{ X86::PSUBSBrr, X86::PSUBSBrm },
|
|
|
|
{ X86::PSUBSWrr, X86::PSUBSWrm },
|
|
|
|
{ X86::PSUBWrr, X86::PSUBWrm },
|
|
|
|
{ X86::PUNPCKHBWrr, X86::PUNPCKHBWrm },
|
|
|
|
{ X86::PUNPCKHDQrr, X86::PUNPCKHDQrm },
|
|
|
|
{ X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm },
|
|
|
|
{ X86::PUNPCKHWDrr, X86::PUNPCKHWDrm },
|
|
|
|
{ X86::PUNPCKLBWrr, X86::PUNPCKLBWrm },
|
|
|
|
{ X86::PUNPCKLDQrr, X86::PUNPCKLDQrm },
|
|
|
|
{ X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm },
|
|
|
|
{ X86::PUNPCKLWDrr, X86::PUNPCKLWDrm },
|
|
|
|
{ X86::PXORrr, X86::PXORrm },
|
|
|
|
{ X86::SBB32rr, X86::SBB32rm },
|
|
|
|
{ X86::SBB64rr, X86::SBB64rm },
|
|
|
|
{ X86::SHUFPDrri, X86::SHUFPDrmi },
|
|
|
|
{ X86::SHUFPSrri, X86::SHUFPSrmi },
|
|
|
|
{ X86::SUB16rr, X86::SUB16rm },
|
|
|
|
{ X86::SUB32rr, X86::SUB32rm },
|
|
|
|
{ X86::SUB64rr, X86::SUB64rm },
|
|
|
|
{ X86::SUB8rr, X86::SUB8rm },
|
|
|
|
{ X86::SUBPDrr, X86::SUBPDrm },
|
|
|
|
{ X86::SUBPSrr, X86::SUBPSrm },
|
|
|
|
{ X86::SUBSDrr, X86::SUBSDrm },
|
|
|
|
{ X86::SUBSSrr, X86::SUBSSrm },
|
|
|
|
// FIXME: TEST*rr -> swapped operand of TEST*mr.
|
|
|
|
{ X86::UNPCKHPDrr, X86::UNPCKHPDrm },
|
|
|
|
{ X86::UNPCKHPSrr, X86::UNPCKHPSrm },
|
|
|
|
{ X86::UNPCKLPDrr, X86::UNPCKLPDrm },
|
|
|
|
{ X86::UNPCKLPSrr, X86::UNPCKLPSrm },
|
|
|
|
{ X86::XOR16rr, X86::XOR16rm },
|
|
|
|
{ X86::XOR32rr, X86::XOR32rm },
|
|
|
|
{ X86::XOR64rr, X86::XOR64rm },
|
|
|
|
{ X86::XOR8rr, X86::XOR8rm },
|
|
|
|
{ X86::XORPDrr, X86::XORPDrm },
|
|
|
|
{ X86::XORPSrr, X86::XORPSrm }
|
|
|
|
};
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
|
|
|
|
unsigned RegOp = OpTbl2[i][0];
|
|
|
|
unsigned MemOp = OpTbl2[i][1];
|
2008-07-07 17:46:23 +00:00
|
|
|
if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp,
|
|
|
|
MemOp)).second)
|
2008-01-07 01:35:02 +00:00
|
|
|
assert(false && "Duplicated entries?");
|
2009-03-04 19:24:25 +00:00
|
|
|
unsigned AuxInfo = 2 | (1 << 4); // Index 2, folded load
|
2008-01-07 01:35:02 +00:00
|
|
|
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
|
2008-07-07 17:46:23 +00:00
|
|
|
std::make_pair(RegOp, AuxInfo))).second)
|
2008-01-07 01:35:02 +00:00
|
|
|
AmbEntries.push_back(MemOp);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove ambiguous entries.
|
|
|
|
assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?");
|
2002-10-25 22:55:53 +00:00
|
|
|
}
|
|
|
|
|
2003-12-28 17:35:08 +00:00
|
|
|
bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
|
2009-01-20 19:12:24 +00:00
|
|
|
unsigned &SrcReg, unsigned &DstReg,
|
|
|
|
unsigned &SrcSubIdx, unsigned &DstSubIdx) const {
|
2008-03-11 19:28:17 +00:00
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case X86::MOV8rr:
|
2009-04-17 22:40:38 +00:00
|
|
|
case X86::MOV8rr_NOREX:
|
2008-03-11 19:28:17 +00:00
|
|
|
case X86::MOV16rr:
|
|
|
|
case X86::MOV32rr:
|
|
|
|
case X86::MOV64rr:
|
|
|
|
case X86::MOVSSrr:
|
|
|
|
case X86::MOVSDrr:
|
2008-03-11 19:30:09 +00:00
|
|
|
|
|
|
|
// FP Stack register class copies
|
|
|
|
case X86::MOV_Fp3232: case X86::MOV_Fp6464: case X86::MOV_Fp8080:
|
|
|
|
case X86::MOV_Fp3264: case X86::MOV_Fp3280:
|
|
|
|
case X86::MOV_Fp6432: case X86::MOV_Fp8032:
|
|
|
|
|
2008-03-11 19:28:17 +00:00
|
|
|
case X86::FsMOVAPSrr:
|
|
|
|
case X86::FsMOVAPDrr:
|
|
|
|
case X86::MOVAPSrr:
|
|
|
|
case X86::MOVAPDrr:
|
2009-01-09 02:40:34 +00:00
|
|
|
case X86::MOVDQArr:
|
2008-03-11 19:28:17 +00:00
|
|
|
case X86::MOVSS2PSrr:
|
|
|
|
case X86::MOVSD2PDrr:
|
|
|
|
case X86::MOVPS2SSrr:
|
|
|
|
case X86::MOVPD2SDrr:
|
|
|
|
case X86::MMX_MOVQ64rr:
|
|
|
|
assert(MI.getNumOperands() >= 2 &&
|
2008-10-03 15:45:36 +00:00
|
|
|
MI.getOperand(0).isReg() &&
|
|
|
|
MI.getOperand(1).isReg() &&
|
2008-03-11 19:28:17 +00:00
|
|
|
"invalid register-register move instruction");
|
2009-01-20 19:12:24 +00:00
|
|
|
SrcReg = MI.getOperand(1).getReg();
|
|
|
|
DstReg = MI.getOperand(0).getReg();
|
|
|
|
SrcSubIdx = MI.getOperand(1).getSubReg();
|
|
|
|
DstSubIdx = MI.getOperand(0).getSubReg();
|
2008-03-11 19:28:17 +00:00
|
|
|
return true;
|
2003-12-28 17:35:08 +00:00
|
|
|
}
|
|
|
|
}
|
2004-07-31 09:38:47 +00:00
|
|
|
|
2008-11-18 19:49:32 +00:00
|
|
|
unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
2006-02-02 20:12:32 +00:00
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case X86::MOV8rm:
|
|
|
|
case X86::MOV16rm:
|
|
|
|
case X86::MOV32rm:
|
2006-09-08 06:48:29 +00:00
|
|
|
case X86::MOV64rm:
|
2007-07-04 21:07:47 +00:00
|
|
|
case X86::LD_Fp64m:
|
2006-02-02 20:12:32 +00:00
|
|
|
case X86::MOVSSrm:
|
|
|
|
case X86::MOVSDrm:
|
2006-04-18 16:44:51 +00:00
|
|
|
case X86::MOVAPSrm:
|
|
|
|
case X86::MOVAPDrm:
|
2009-01-09 02:40:34 +00:00
|
|
|
case X86::MOVDQArm:
|
2007-04-03 06:00:37 +00:00
|
|
|
case X86::MMX_MOVD64rm:
|
|
|
|
case X86::MMX_MOVQ64rm:
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(3).isReg() && MI->getOperand(4).isImm() &&
|
2007-12-30 20:49:49 +00:00
|
|
|
MI->getOperand(2).getImm() == 1 &&
|
2006-02-02 20:12:32 +00:00
|
|
|
MI->getOperand(3).getReg() == 0 &&
|
2007-12-30 20:49:49 +00:00
|
|
|
MI->getOperand(4).getImm() == 0) {
|
2007-12-30 23:10:15 +00:00
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2006-02-02 20:12:32 +00:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-11-18 19:49:32 +00:00
|
|
|
unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
2006-02-02 20:12:32 +00:00
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case X86::MOV8mr:
|
|
|
|
case X86::MOV16mr:
|
|
|
|
case X86::MOV32mr:
|
2006-09-08 06:48:29 +00:00
|
|
|
case X86::MOV64mr:
|
2007-07-04 21:07:47 +00:00
|
|
|
case X86::ST_FpP64m:
|
2006-02-02 20:12:32 +00:00
|
|
|
case X86::MOVSSmr:
|
|
|
|
case X86::MOVSDmr:
|
2006-04-18 16:44:51 +00:00
|
|
|
case X86::MOVAPSmr:
|
|
|
|
case X86::MOVAPDmr:
|
2009-01-09 02:40:34 +00:00
|
|
|
case X86::MOVDQAmr:
|
2007-04-03 06:00:37 +00:00
|
|
|
case X86::MMX_MOVD64mr:
|
|
|
|
case X86::MMX_MOVQ64mr:
|
2007-04-03 23:48:32 +00:00
|
|
|
case X86::MMX_MOVNTQmr:
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
|
|
|
|
MI->getOperand(2).isReg() && MI->getOperand(3).isImm() &&
|
2007-12-30 20:49:49 +00:00
|
|
|
MI->getOperand(1).getImm() == 1 &&
|
2006-02-02 20:38:12 +00:00
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
2007-12-30 20:49:49 +00:00
|
|
|
MI->getOperand(3).getImm() == 0) {
|
2007-12-30 23:10:15 +00:00
|
|
|
FrameIndex = MI->getOperand(0).getIndex();
|
2009-03-28 17:03:24 +00:00
|
|
|
return MI->getOperand(X86AddrNumOperands).getReg();
|
2006-02-02 20:12:32 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-03-27 01:45:11 +00:00
|
|
|
/// regIsPICBase - Return true if register is PIC base (i.e.g defined by
|
|
|
|
/// X86::MOVPC32r.
|
2008-07-07 23:14:23 +00:00
|
|
|
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
|
2008-03-27 01:45:11 +00:00
|
|
|
bool isPICBase = false;
|
|
|
|
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
|
|
|
|
E = MRI.def_end(); I != E; ++I) {
|
|
|
|
MachineInstr *DefMI = I.getOperand().getParent();
|
|
|
|
if (DefMI->getOpcode() != X86::MOVPC32r)
|
|
|
|
return false;
|
|
|
|
assert(!isPICBase && "More than one PIC base?");
|
|
|
|
isPICBase = true;
|
|
|
|
}
|
|
|
|
return isPICBase;
|
|
|
|
}
|
2008-03-31 07:54:19 +00:00
|
|
|
|
|
|
|
/// isGVStub - Return true if the GV requires an extra load to get the
|
|
|
|
/// real address.
|
|
|
|
static inline bool isGVStub(GlobalValue *GV, X86TargetMachine &TM) {
|
|
|
|
return TM.getSubtarget<X86Subtarget>().GVRequiresExtraLoad(GV, TM, false);
|
|
|
|
}
|
2009-06-27 04:38:55 +00:00
|
|
|
|
|
|
|
/// CanRematLoadWithDispOperand - Return true if a load with the specified
|
|
|
|
/// operand is a candidate for remat: for this to be true we need to know that
|
|
|
|
/// the load will always return the same value, even if moved.
|
|
|
|
static bool CanRematLoadWithDispOperand(const MachineOperand &MO,
|
|
|
|
X86TargetMachine &TM) {
|
|
|
|
// Loads from constant pool entries can be remat'd.
|
|
|
|
if (MO.isCPI()) return true;
|
|
|
|
|
|
|
|
// We can remat globals in some cases.
|
|
|
|
if (MO.isGlobal()) {
|
|
|
|
// If this is a load of a stub, not of the global, we can remat it. This
|
|
|
|
// access will always return the address of the global.
|
|
|
|
if (isGVStub(MO.getGlobal(), TM))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If the global itself is constant, we can remat the load.
|
|
|
|
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(MO.getGlobal()))
|
|
|
|
if (GV->isConstant())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2008-03-27 01:41:09 +00:00
|
|
|
|
2008-05-12 20:54:26 +00:00
|
|
|
bool
|
|
|
|
X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI) const {
|
2007-06-14 20:50:44 +00:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2008-03-27 01:41:09 +00:00
|
|
|
case X86::MOV8rm:
|
|
|
|
case X86::MOV16rm:
|
|
|
|
case X86::MOV32rm:
|
|
|
|
case X86::MOV64rm:
|
|
|
|
case X86::LD_Fp64m:
|
|
|
|
case X86::MOVSSrm:
|
|
|
|
case X86::MOVSDrm:
|
|
|
|
case X86::MOVAPSrm:
|
|
|
|
case X86::MOVAPDrm:
|
2009-01-09 02:40:34 +00:00
|
|
|
case X86::MOVDQArm:
|
2008-03-27 01:41:09 +00:00
|
|
|
case X86::MMX_MOVD64rm:
|
|
|
|
case X86::MMX_MOVQ64rm: {
|
|
|
|
// Loads from constant pools are trivially rematerializable.
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MI->getOperand(1).isReg() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
|
2009-06-27 04:38:55 +00:00
|
|
|
CanRematLoadWithDispOperand(MI->getOperand(4), TM)) {
|
2008-03-27 01:41:09 +00:00
|
|
|
unsigned BaseReg = MI->getOperand(1).getReg();
|
Reimplement rip-relative addressing in the X86-64 backend. The new
implementation primarily differs from the former in that the asmprinter
doesn't make a zillion decisions about whether or not something will be
RIP relative or not. Instead, those decisions are made by isel lowering
and propagated through to the asm printer. To achieve this, we:
1. Represent RIP relative addresses by setting the base of the X86 addr
mode to X86::RIP.
2. When ISel Lowering decides that it is safe to use RIP, it lowers to
X86ISD::WrapperRIP. When it is unsafe to use RIP, it lowers to
X86ISD::Wrapper as before.
3. This removes isRIPRel from X86ISelAddressMode, representing it with
a basereg of RIP instead.
4. The addressing mode matching logic in isel is greatly simplified.
5. The asmprinter is greatly simplified, notably the "NotRIPRel" predicate
passed through various printoperand routines is gone now.
6. The various symbol printing routines in asmprinter now no longer infer
when to emit (%rip), they just print the symbol.
I think this is a big improvement over the previous situation. It does have
two small caveats though: 1. I implemented a horrible "no-rip" modifier for
the inline asm "P" constraint modifier. This is a short term hack, there is
a much better, but more involved, solution. 2. I had to xfail an
-aggressive-remat testcase because it isn't handling the use of RIP in the
constant-pool reading instruction. This specific test is easy to fix without
-aggressive-remat, which I intend to do next.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@74372 91177308-0d34-0410-b5e6-96231b3b80d8
2009-06-27 04:16:01 +00:00
|
|
|
if (BaseReg == 0 || BaseReg == X86::RIP)
|
2008-03-27 01:41:09 +00:00
|
|
|
return true;
|
|
|
|
// Allow re-materialization of PIC load.
|
2008-10-03 15:45:36 +00:00
|
|
|
if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
|
2008-04-01 23:26:12 +00:00
|
|
|
return false;
|
2008-07-07 23:14:23 +00:00
|
|
|
const MachineFunction &MF = *MI->getParent()->getParent();
|
|
|
|
const MachineRegisterInfo &MRI = MF.getRegInfo();
|
2008-03-27 01:41:09 +00:00
|
|
|
bool isPICBase = false;
|
|
|
|
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
|
|
|
|
E = MRI.def_end(); I != E; ++I) {
|
|
|
|
MachineInstr *DefMI = I.getOperand().getParent();
|
|
|
|
if (DefMI->getOpcode() != X86::MOVPC32r)
|
|
|
|
return false;
|
|
|
|
assert(!isPICBase && "More than one PIC base?");
|
|
|
|
isPICBase = true;
|
|
|
|
}
|
|
|
|
return isPICBase;
|
|
|
|
}
|
|
|
|
return false;
|
2008-02-22 09:25:47 +00:00
|
|
|
}
|
2008-03-27 01:41:09 +00:00
|
|
|
|
|
|
|
case X86::LEA32r:
|
|
|
|
case X86::LEA64r: {
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
|
|
|
|
!MI->getOperand(4).isReg()) {
|
2008-03-27 01:41:09 +00:00
|
|
|
// lea fi#, lea GV, etc. are all rematerializable.
|
2008-10-03 15:45:36 +00:00
|
|
|
if (!MI->getOperand(1).isReg())
|
2008-09-26 21:30:20 +00:00
|
|
|
return true;
|
2008-03-27 01:41:09 +00:00
|
|
|
unsigned BaseReg = MI->getOperand(1).getReg();
|
|
|
|
if (BaseReg == 0)
|
|
|
|
return true;
|
|
|
|
// Allow re-materialization of lea PICBase + x.
|
2008-07-07 23:14:23 +00:00
|
|
|
const MachineFunction &MF = *MI->getParent()->getParent();
|
|
|
|
const MachineRegisterInfo &MRI = MF.getRegInfo();
|
2008-03-27 01:45:11 +00:00
|
|
|
return regIsPICBase(BaseReg, MRI);
|
2008-03-27 01:41:09 +00:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2007-06-14 20:50:44 +00:00
|
|
|
}
|
2008-03-27 01:41:09 +00:00
|
|
|
|
2007-06-26 00:48:07 +00:00
|
|
|
// All other instructions marked M_REMATERIALIZABLE are always trivially
|
|
|
|
// rematerializable.
|
|
|
|
return true;
|
2007-06-14 20:50:44 +00:00
|
|
|
}
|
|
|
|
|
2008-06-24 07:10:51 +00:00
|
|
|
/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that
|
|
|
|
/// would clobber the EFLAGS condition register. Note the result may be
|
|
|
|
/// conservative. If it cannot definitely determine the safety after visiting
|
|
|
|
/// two instructions it assumes it's not safe.
|
|
|
|
static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I) {
|
2008-10-21 03:24:31 +00:00
|
|
|
// It's always safe to clobber EFLAGS at the end of a block.
|
|
|
|
if (I == MBB.end())
|
|
|
|
return true;
|
|
|
|
|
2008-06-24 07:10:51 +00:00
|
|
|
// For compile time consideration, if we are not able to determine the
|
|
|
|
// safety after visiting 2 instructions, we will assume it's not safe.
|
|
|
|
for (unsigned i = 0; i < 2; ++i) {
|
|
|
|
bool SeenDef = false;
|
|
|
|
for (unsigned j = 0, e = I->getNumOperands(); j != e; ++j) {
|
|
|
|
MachineOperand &MO = I->getOperand(j);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (!MO.isReg())
|
2008-06-24 07:10:51 +00:00
|
|
|
continue;
|
|
|
|
if (MO.getReg() == X86::EFLAGS) {
|
|
|
|
if (MO.isUse())
|
|
|
|
return false;
|
|
|
|
SeenDef = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (SeenDef)
|
|
|
|
// This instruction defines EFLAGS, no need to look any further.
|
|
|
|
return true;
|
|
|
|
++I;
|
2008-10-21 03:24:31 +00:00
|
|
|
|
|
|
|
// If we make it to the end of the block, it's safe to clobber EFLAGS.
|
|
|
|
if (I == MBB.end())
|
|
|
|
return true;
|
2008-06-24 07:10:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Conservative answer.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-03-31 20:40:39 +00:00
|
|
|
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg,
|
|
|
|
const MachineInstr *Orig) const {
|
2009-02-11 21:51:19 +00:00
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
|
|
|
|
2008-10-03 15:45:36 +00:00
|
|
|
unsigned SubIdx = Orig->getOperand(0).isReg()
|
2008-04-16 23:44:44 +00:00
|
|
|
? Orig->getOperand(0).getSubReg() : 0;
|
|
|
|
bool ChangeSubIdx = SubIdx != 0;
|
|
|
|
if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
|
|
|
|
DestReg = RI.getSubReg(DestReg, SubIdx);
|
|
|
|
SubIdx = 0;
|
|
|
|
}
|
|
|
|
|
2008-03-31 20:40:39 +00:00
|
|
|
// MOV32r0 etc. are implemented with xor which clobbers condition code.
|
|
|
|
// Re-materialize them as movri instructions to avoid side effects.
|
2008-06-24 07:10:51 +00:00
|
|
|
bool Emitted = false;
|
2008-03-31 20:40:39 +00:00
|
|
|
switch (Orig->getOpcode()) {
|
2008-06-24 07:10:51 +00:00
|
|
|
default: break;
|
2008-03-31 20:40:39 +00:00
|
|
|
case X86::MOV8r0:
|
|
|
|
case X86::MOV16r0:
|
|
|
|
case X86::MOV32r0:
|
2008-06-24 07:10:51 +00:00
|
|
|
case X86::MOV64r0: {
|
|
|
|
if (!isSafeToClobberEFLAGS(MBB, I)) {
|
|
|
|
unsigned Opc = 0;
|
|
|
|
switch (Orig->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case X86::MOV8r0: Opc = X86::MOV8ri; break;
|
|
|
|
case X86::MOV16r0: Opc = X86::MOV16ri; break;
|
|
|
|
case X86::MOV32r0: Opc = X86::MOV32ri; break;
|
|
|
|
case X86::MOV64r0: Opc = X86::MOV64ri32; break;
|
|
|
|
}
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(MBB, I, DL, get(Opc), DestReg).addImm(0);
|
2008-06-24 07:10:51 +00:00
|
|
|
Emitted = true;
|
|
|
|
}
|
2008-03-31 20:40:39 +00:00
|
|
|
break;
|
2008-06-24 07:10:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Emitted) {
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
|
2008-03-31 20:40:39 +00:00
|
|
|
MI->getOperand(0).setReg(DestReg);
|
|
|
|
MBB.insert(I, MI);
|
|
|
|
}
|
2008-04-16 23:44:44 +00:00
|
|
|
|
|
|
|
if (ChangeSubIdx) {
|
|
|
|
MachineInstr *NewMI = prior(I);
|
|
|
|
NewMI->getOperand(0).setSubReg(SubIdx);
|
|
|
|
}
|
2008-03-31 20:40:39 +00:00
|
|
|
}
|
|
|
|
|
2008-01-10 23:08:24 +00:00
|
|
|
/// isInvariantLoad - Return true if the specified instruction (which is marked
|
|
|
|
/// mayLoad) is loading from a location whose value is invariant across the
|
|
|
|
/// function. For example, loading a value from the constant pool or from
|
|
|
|
/// from the argument area of a function if it does not change. This should
|
|
|
|
/// only return true of *all* loads the instruction does are invariant (if it
|
|
|
|
/// does multiple loads).
|
2008-11-18 19:49:32 +00:00
|
|
|
bool X86InstrInfo::isInvariantLoad(const MachineInstr *MI) const {
|
2008-01-12 00:35:08 +00:00
|
|
|
// This code cares about loads from three cases: constant pool entries,
|
|
|
|
// invariant argument slots, and global stubs. In order to handle these cases
|
|
|
|
// for all of the myriad of X86 instructions, we just scan for a CP/FI/GV
|
2008-01-12 00:53:16 +00:00
|
|
|
// operand and base our analysis on it. This is safe because the address of
|
2008-01-12 00:35:08 +00:00
|
|
|
// none of these three cases is ever used as anything other than a load base
|
|
|
|
// and X86 doesn't have any instructions that load from multiple places.
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
2008-01-10 23:08:24 +00:00
|
|
|
// Loads from constant pools are trivially invariant.
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isCPI())
|
2008-01-05 05:28:30 +00:00
|
|
|
return true;
|
2008-03-31 07:54:19 +00:00
|
|
|
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isGlobal())
|
2008-03-31 07:54:19 +00:00
|
|
|
return isGVStub(MO.getGlobal(), TM);
|
2008-01-12 00:35:08 +00:00
|
|
|
|
|
|
|
// If this is a load from an invariant stack slot, the load is a constant.
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isFI()) {
|
2008-01-12 00:35:08 +00:00
|
|
|
const MachineFrameInfo &MFI =
|
|
|
|
*MI->getParent()->getParent()->getFrameInfo();
|
|
|
|
int Idx = MO.getIndex();
|
2008-01-10 04:16:31 +00:00
|
|
|
return MFI.isFixedObjectIndex(Idx) && MFI.isImmutableObjectIndex(Idx);
|
|
|
|
}
|
2007-12-17 23:07:56 +00:00
|
|
|
}
|
2008-01-12 00:35:08 +00:00
|
|
|
|
2008-01-10 23:08:24 +00:00
|
|
|
// All other instances of these instructions are presumed to have other
|
|
|
|
// issues.
|
2008-01-05 05:26:26 +00:00
|
|
|
return false;
|
2007-12-17 23:07:56 +00:00
|
|
|
}
|
|
|
|
|
2007-10-05 08:04:01 +00:00
|
|
|
/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
|
|
|
|
/// is not marked dead.
|
|
|
|
static bool hasLiveCondCodeDef(MachineInstr *MI) {
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isReg() && MO.isDef() &&
|
2007-10-05 08:04:01 +00:00
|
|
|
MO.getReg() == X86::EFLAGS && !MO.isDead()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-01-02 02:37:07 +00:00
|
|
|
/// convertToThreeAddress - This method must be implemented by targets that
|
|
|
|
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
|
|
|
|
/// may be able to convert a two-address instruction into a true
|
|
|
|
/// three-address instruction on demand. This allows the X86 target (for
|
|
|
|
/// example) to convert ADD and SHL instructions into LEA instructions if they
|
|
|
|
/// would require register copies due to two-addressness.
|
|
|
|
///
|
|
|
|
/// This method returns a null pointer if the transformation cannot be
|
|
|
|
/// performed, otherwise it returns the new instruction.
|
|
|
|
///
|
2006-12-01 21:52:41 +00:00
|
|
|
MachineInstr *
|
|
|
|
X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
|
|
|
MachineBasicBlock::iterator &MBBI,
|
2008-07-02 23:41:07 +00:00
|
|
|
LiveVariables *LV) const {
|
2006-12-01 21:52:41 +00:00
|
|
|
MachineInstr *MI = MBBI;
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
2005-01-02 02:37:07 +00:00
|
|
|
// All instructions input are two-addr instructions. Get the known operands.
|
|
|
|
unsigned Dest = MI->getOperand(0).getReg();
|
|
|
|
unsigned Src = MI->getOperand(1).getReg();
|
2008-07-03 09:09:37 +00:00
|
|
|
bool isDead = MI->getOperand(0).isDead();
|
|
|
|
bool isKill = MI->getOperand(1).isKill();
|
2005-01-02 02:37:07 +00:00
|
|
|
|
2006-11-15 20:58:11 +00:00
|
|
|
MachineInstr *NewMI = NULL;
|
2006-12-01 21:52:41 +00:00
|
|
|
// FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35204 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-20 06:08:29 +00:00
|
|
|
// we have better subtarget support, enable the 16-bit LEA generation here.
|
2006-12-01 21:52:41 +00:00
|
|
|
bool DisableLEA16 = true;
|
|
|
|
|
2007-10-05 20:34:26 +00:00
|
|
|
unsigned MIOpc = MI->getOpcode();
|
|
|
|
switch (MIOpc) {
|
2006-05-30 20:26:50 +00:00
|
|
|
case X86::SHUFPSrri: {
|
|
|
|
assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35204 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-20 06:08:29 +00:00
|
|
|
if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
|
|
|
|
|
2006-05-30 21:45:53 +00:00
|
|
|
unsigned B = MI->getOperand(1).getReg();
|
|
|
|
unsigned C = MI->getOperand(2).getReg();
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35204 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-20 06:08:29 +00:00
|
|
|
if (B != C) return 0;
|
2008-07-03 09:09:37 +00:00
|
|
|
unsigned A = MI->getOperand(0).getReg();
|
|
|
|
unsigned M = MI->getOperand(3).getImm();
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(A, RegState::Define | getDeadRegState(isDead))
|
|
|
|
.addReg(B, getKillRegState(isKill)).addImm(M);
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35204 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-20 06:08:29 +00:00
|
|
|
break;
|
|
|
|
}
|
2007-03-28 18:12:31 +00:00
|
|
|
case X86::SHL64ri: {
|
2007-09-14 21:48:26 +00:00
|
|
|
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
|
2007-03-28 18:12:31 +00:00
|
|
|
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
|
|
|
|
// the flags produced by a shift yet, so this is safe.
|
|
|
|
unsigned ShAmt = MI->getOperand(2).getImm();
|
|
|
|
if (ShAmt == 0 || ShAmt >= 4) return 0;
|
2008-07-03 09:09:37 +00:00
|
|
|
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
|
|
|
|
.addReg(0).addImm(1 << ShAmt)
|
|
|
|
.addReg(Src, getKillRegState(isKill))
|
|
|
|
.addImm(0);
|
2007-03-28 18:12:31 +00:00
|
|
|
break;
|
|
|
|
}
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35204 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-20 06:08:29 +00:00
|
|
|
case X86::SHL32ri: {
|
2007-09-14 21:48:26 +00:00
|
|
|
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35204 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-20 06:08:29 +00:00
|
|
|
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
|
|
|
|
// the flags produced by a shift yet, so this is safe.
|
|
|
|
unsigned ShAmt = MI->getOperand(2).getImm();
|
|
|
|
if (ShAmt == 0 || ShAmt >= 4) return 0;
|
2008-07-03 09:09:37 +00:00
|
|
|
|
2007-03-28 00:58:40 +00:00
|
|
|
unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ?
|
|
|
|
X86::LEA64_32r : X86::LEA32r;
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
|
2008-07-03 09:09:37 +00:00
|
|
|
.addReg(0).addImm(1 << ShAmt)
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Src, getKillRegState(isKill)).addImm(0);
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35204 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-20 06:08:29 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86::SHL16ri: {
|
2007-09-14 21:48:26 +00:00
|
|
|
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
|
2007-09-06 00:14:41 +00:00
|
|
|
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
|
|
|
|
// the flags produced by a shift yet, so this is safe.
|
|
|
|
unsigned ShAmt = MI->getOperand(2).getImm();
|
|
|
|
if (ShAmt == 0 || ShAmt >= 4) return 0;
|
2008-07-03 09:09:37 +00:00
|
|
|
|
2007-08-10 21:18:25 +00:00
|
|
|
if (DisableLEA16) {
|
|
|
|
// If 16-bit LEA is disabled, use 32-bit LEA via subregisters.
|
2007-12-31 04:13:23 +00:00
|
|
|
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
|
2007-09-06 00:14:41 +00:00
|
|
|
unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
|
|
|
|
? X86::LEA64_32r : X86::LEA32r;
|
2007-12-31 04:13:23 +00:00
|
|
|
unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
|
|
|
|
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
|
2008-03-10 19:31:26 +00:00
|
|
|
|
2008-03-11 10:27:36 +00:00
|
|
|
// Build and insert into an implicit UNDEF value. This is OK because
|
|
|
|
// well be shifting and then extracting the lower 16-bits.
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
|
|
|
|
MachineInstr *InsMI =
|
|
|
|
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg)
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(leaInReg)
|
|
|
|
.addReg(Src, getKillRegState(isKill))
|
2008-07-03 09:09:37 +00:00
|
|
|
.addImm(X86::SUBREG_16BIT);
|
2008-03-16 03:12:01 +00:00
|
|
|
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(Opc), leaOutReg)
|
|
|
|
.addReg(0).addImm(1 << ShAmt)
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(leaInReg, RegState::Kill)
|
|
|
|
.addImm(0);
|
2007-08-10 21:18:25 +00:00
|
|
|
|
2009-02-11 21:51:19 +00:00
|
|
|
MachineInstr *ExtMI =
|
|
|
|
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::EXTRACT_SUBREG))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
|
|
|
|
.addReg(leaOutReg, RegState::Kill)
|
|
|
|
.addImm(X86::SUBREG_16BIT);
|
2009-02-11 21:51:19 +00:00
|
|
|
|
2008-07-02 23:41:07 +00:00
|
|
|
if (LV) {
|
2008-07-03 09:09:37 +00:00
|
|
|
// Update live variables
|
|
|
|
LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
|
|
|
|
LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
|
|
|
|
if (isKill)
|
|
|
|
LV->replaceKillInstruction(Src, MI, InsMI);
|
|
|
|
if (isDead)
|
|
|
|
LV->replaceKillInstruction(Dest, MI, ExtMI);
|
2008-07-02 23:41:07 +00:00
|
|
|
}
|
2008-07-03 09:09:37 +00:00
|
|
|
return ExtMI;
|
2007-08-10 21:18:25 +00:00
|
|
|
} else {
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
|
2008-07-03 09:09:37 +00:00
|
|
|
.addReg(0).addImm(1 << ShAmt)
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Src, getKillRegState(isKill))
|
|
|
|
.addImm(0);
|
2007-08-10 21:18:25 +00:00
|
|
|
}
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35204 91177308-0d34-0410-b5e6-96231b3b80d8
2007-03-20 06:08:29 +00:00
|
|
|
break;
|
2006-05-30 20:26:50 +00:00
|
|
|
}
|
2007-10-05 20:34:26 +00:00
|
|
|
default: {
|
|
|
|
// The following opcodes also sets the condition code register(s). Only
|
|
|
|
// convert them to equivalent lea if the condition code register def's
|
|
|
|
// are dead!
|
|
|
|
if (hasLiveCondCodeDef(MI))
|
|
|
|
return 0;
|
2006-05-30 20:26:50 +00:00
|
|
|
|
2007-10-09 07:14:53 +00:00
|
|
|
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
|
2007-10-05 20:34:26 +00:00
|
|
|
switch (MIOpc) {
|
|
|
|
default: return 0;
|
|
|
|
case X86::INC64r:
|
2009-01-06 23:34:46 +00:00
|
|
|
case X86::INC32r:
|
|
|
|
case X86::INC64_32r: {
|
2007-10-05 20:34:26 +00:00
|
|
|
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
|
2007-10-09 07:14:53 +00:00
|
|
|
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
|
|
|
|
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
|
2009-04-08 21:14:34 +00:00
|
|
|
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2009-04-08 21:14:34 +00:00
|
|
|
Src, isKill, 1);
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86::INC16r:
|
|
|
|
case X86::INC64_16r:
|
|
|
|
if (DisableLEA16) return 0;
|
|
|
|
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2008-07-03 09:09:37 +00:00
|
|
|
Src, isKill, 1);
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
|
|
|
case X86::DEC64r:
|
2009-01-06 23:34:46 +00:00
|
|
|
case X86::DEC32r:
|
|
|
|
case X86::DEC64_32r: {
|
2007-10-05 20:34:26 +00:00
|
|
|
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
|
2007-10-09 07:14:53 +00:00
|
|
|
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
|
|
|
|
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
|
2009-04-08 21:14:34 +00:00
|
|
|
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2009-04-08 21:14:34 +00:00
|
|
|
Src, isKill, -1);
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86::DEC16r:
|
|
|
|
case X86::DEC64_16r:
|
|
|
|
if (DisableLEA16) return 0;
|
|
|
|
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2008-07-03 09:09:37 +00:00
|
|
|
Src, isKill, -1);
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
|
|
|
case X86::ADD64rr:
|
|
|
|
case X86::ADD32rr: {
|
|
|
|
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
2007-10-09 07:14:53 +00:00
|
|
|
unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
|
|
|
|
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
|
2008-07-03 09:09:37 +00:00
|
|
|
unsigned Src2 = MI->getOperand(2).getReg();
|
|
|
|
bool isKill2 = MI->getOperand(2).isKill();
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2008-07-03 09:09:37 +00:00
|
|
|
Src, isKill, Src2, isKill2);
|
|
|
|
if (LV && isKill2)
|
|
|
|
LV->replaceKillInstruction(Src2, MI, NewMI);
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
|
|
|
}
|
2008-07-03 09:09:37 +00:00
|
|
|
case X86::ADD16rr: {
|
2007-10-05 20:34:26 +00:00
|
|
|
if (DisableLEA16) return 0;
|
|
|
|
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
2008-07-03 09:09:37 +00:00
|
|
|
unsigned Src2 = MI->getOperand(2).getReg();
|
|
|
|
bool isKill2 = MI->getOperand(2).isKill();
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2008-07-03 09:09:37 +00:00
|
|
|
Src, isKill, Src2, isKill2);
|
|
|
|
if (LV && isKill2)
|
|
|
|
LV->replaceKillInstruction(Src2, MI, NewMI);
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
2008-07-03 09:09:37 +00:00
|
|
|
}
|
2007-10-05 20:34:26 +00:00
|
|
|
case X86::ADD64ri32:
|
|
|
|
case X86::ADD64ri8:
|
|
|
|
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MI->getOperand(2).isImm())
|
2009-04-08 21:14:34 +00:00
|
|
|
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2009-04-08 21:14:34 +00:00
|
|
|
Src, isKill, MI->getOperand(2).getImm());
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
|
|
|
case X86::ADD32ri:
|
|
|
|
case X86::ADD32ri8:
|
|
|
|
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MI->getOperand(2).isImm()) {
|
2007-10-09 07:14:53 +00:00
|
|
|
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
|
2009-04-08 21:14:34 +00:00
|
|
|
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2009-04-08 21:14:34 +00:00
|
|
|
Src, isKill, MI->getOperand(2).getImm());
|
2007-10-09 07:14:53 +00:00
|
|
|
}
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
|
|
|
case X86::ADD16ri:
|
|
|
|
case X86::ADD16ri8:
|
|
|
|
if (DisableLEA16) return 0;
|
|
|
|
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MI->getOperand(2).isImm())
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)),
|
2008-07-03 09:09:37 +00:00
|
|
|
Src, isKill, MI->getOperand(2).getImm());
|
2007-10-05 20:34:26 +00:00
|
|
|
break;
|
|
|
|
case X86::SHL16ri:
|
|
|
|
if (DisableLEA16) return 0;
|
|
|
|
case X86::SHL32ri:
|
|
|
|
case X86::SHL64ri: {
|
2008-10-03 15:45:36 +00:00
|
|
|
assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImm() &&
|
2007-10-05 20:34:26 +00:00
|
|
|
"Unknown shl instruction!");
|
2007-12-30 20:49:49 +00:00
|
|
|
unsigned ShAmt = MI->getOperand(2).getImm();
|
2007-10-05 20:34:26 +00:00
|
|
|
if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) {
|
|
|
|
X86AddressMode AM;
|
|
|
|
AM.Scale = 1 << ShAmt;
|
|
|
|
AM.IndexReg = Src;
|
|
|
|
unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r
|
2007-10-09 07:14:53 +00:00
|
|
|
: (MIOpc == X86::SHL32ri
|
|
|
|
? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r);
|
2009-02-11 21:51:19 +00:00
|
|
|
NewMI = addFullAddress(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(Dest, RegState::Define |
|
|
|
|
getDeadRegState(isDead)), AM);
|
2008-07-03 09:09:37 +00:00
|
|
|
if (isKill)
|
|
|
|
NewMI->getOperand(3).setIsKill(true);
|
2007-10-05 20:34:26 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2005-01-02 02:37:07 +00:00
|
|
|
}
|
|
|
|
}
|
2006-12-01 21:52:41 +00:00
|
|
|
}
|
2007-10-05 20:34:26 +00:00
|
|
|
|
2008-02-07 08:29:53 +00:00
|
|
|
if (!NewMI) return 0;
|
|
|
|
|
2008-07-03 09:09:37 +00:00
|
|
|
if (LV) { // Update live variables
|
|
|
|
if (isKill)
|
|
|
|
LV->replaceKillInstruction(Src, MI, NewMI);
|
|
|
|
if (isDead)
|
|
|
|
LV->replaceKillInstruction(Dest, MI, NewMI);
|
|
|
|
}
|
|
|
|
|
2007-10-05 20:34:26 +00:00
|
|
|
MFI->insert(MBBI, NewMI); // Insert the new inst
|
2006-11-15 20:58:11 +00:00
|
|
|
return NewMI;
|
2005-01-02 02:37:07 +00:00
|
|
|
}
|
|
|
|
|
Teach the code generator that shrd/shld is commutable if it has an immediate.
This allows us to generate this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %EDX, DWORD PTR [%ESP + 8]
shld %EDX, %EDX, 2
shl %EAX, 2
ret
instead of this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %ECX, DWORD PTR [%ESP + 8]
mov %EDX, %EAX
shrd %EDX, %ECX, 30
shl %EAX, 2
ret
Note the magically transmogrifying immediate.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19686 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-19 07:11:01 +00:00
|
|
|
/// commuteInstruction - We have a few instructions that must be hacked on to
|
|
|
|
/// commute them.
|
|
|
|
///
|
2008-06-16 07:33:11 +00:00
|
|
|
MachineInstr *
|
|
|
|
X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
|
Teach the code generator that shrd/shld is commutable if it has an immediate.
This allows us to generate this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %EDX, DWORD PTR [%ESP + 8]
shld %EDX, %EDX, 2
shl %EAX, 2
ret
instead of this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %ECX, DWORD PTR [%ESP + 8]
mov %EDX, %EAX
shrd %EDX, %ECX, 30
shl %EAX, 2
ret
Note the magically transmogrifying immediate.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19686 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-19 07:11:01 +00:00
|
|
|
switch (MI->getOpcode()) {
|
2005-01-19 07:31:24 +00:00
|
|
|
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
|
|
|
|
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
|
Teach the code generator that shrd/shld is commutable if it has an immediate.
This allows us to generate this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %EDX, DWORD PTR [%ESP + 8]
shld %EDX, %EDX, 2
shl %EAX, 2
ret
instead of this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %ECX, DWORD PTR [%ESP + 8]
mov %EDX, %EAX
shrd %EDX, %ECX, 30
shl %EAX, 2
ret
Note the magically transmogrifying immediate.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19686 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-19 07:11:01 +00:00
|
|
|
case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
|
2007-09-14 23:17:45 +00:00
|
|
|
case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
|
|
|
|
case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
|
|
|
|
case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
|
2005-01-19 07:31:24 +00:00
|
|
|
unsigned Opc;
|
|
|
|
unsigned Size;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: assert(0 && "Unreachable!");
|
|
|
|
case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
|
|
|
|
case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
|
|
|
|
case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
|
|
|
|
case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
|
2007-09-14 23:17:45 +00:00
|
|
|
case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
|
|
|
|
case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
|
2005-01-19 07:31:24 +00:00
|
|
|
}
|
2007-12-30 20:49:49 +00:00
|
|
|
unsigned Amt = MI->getOperand(3).getImm();
|
2008-10-17 01:23:35 +00:00
|
|
|
if (NewMI) {
|
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
|
|
|
MI = MF.CloneMachineInstr(MI);
|
|
|
|
NewMI = false;
|
2008-02-13 02:46:49 +00:00
|
|
|
}
|
2008-10-17 01:23:35 +00:00
|
|
|
MI->setDesc(get(Opc));
|
|
|
|
MI->getOperand(3).setImm(Size-Amt);
|
|
|
|
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
|
Teach the code generator that shrd/shld is commutable if it has an immediate.
This allows us to generate this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %EDX, DWORD PTR [%ESP + 8]
shld %EDX, %EDX, 2
shl %EAX, 2
ret
instead of this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %ECX, DWORD PTR [%ESP + 8]
mov %EDX, %EAX
shrd %EDX, %ECX, 30
shl %EAX, 2
ret
Note the magically transmogrifying immediate.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19686 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-19 07:11:01 +00:00
|
|
|
}
|
2007-10-05 23:13:21 +00:00
|
|
|
case X86::CMOVB16rr:
|
|
|
|
case X86::CMOVB32rr:
|
|
|
|
case X86::CMOVB64rr:
|
|
|
|
case X86::CMOVAE16rr:
|
|
|
|
case X86::CMOVAE32rr:
|
|
|
|
case X86::CMOVAE64rr:
|
|
|
|
case X86::CMOVE16rr:
|
|
|
|
case X86::CMOVE32rr:
|
|
|
|
case X86::CMOVE64rr:
|
|
|
|
case X86::CMOVNE16rr:
|
|
|
|
case X86::CMOVNE32rr:
|
|
|
|
case X86::CMOVNE64rr:
|
|
|
|
case X86::CMOVBE16rr:
|
|
|
|
case X86::CMOVBE32rr:
|
|
|
|
case X86::CMOVBE64rr:
|
|
|
|
case X86::CMOVA16rr:
|
|
|
|
case X86::CMOVA32rr:
|
|
|
|
case X86::CMOVA64rr:
|
|
|
|
case X86::CMOVL16rr:
|
|
|
|
case X86::CMOVL32rr:
|
|
|
|
case X86::CMOVL64rr:
|
|
|
|
case X86::CMOVGE16rr:
|
|
|
|
case X86::CMOVGE32rr:
|
|
|
|
case X86::CMOVGE64rr:
|
|
|
|
case X86::CMOVLE16rr:
|
|
|
|
case X86::CMOVLE32rr:
|
|
|
|
case X86::CMOVLE64rr:
|
|
|
|
case X86::CMOVG16rr:
|
|
|
|
case X86::CMOVG32rr:
|
|
|
|
case X86::CMOVG64rr:
|
|
|
|
case X86::CMOVS16rr:
|
|
|
|
case X86::CMOVS32rr:
|
|
|
|
case X86::CMOVS64rr:
|
|
|
|
case X86::CMOVNS16rr:
|
|
|
|
case X86::CMOVNS32rr:
|
|
|
|
case X86::CMOVNS64rr:
|
|
|
|
case X86::CMOVP16rr:
|
|
|
|
case X86::CMOVP32rr:
|
|
|
|
case X86::CMOVP64rr:
|
|
|
|
case X86::CMOVNP16rr:
|
|
|
|
case X86::CMOVNP32rr:
|
2009-01-07 00:35:10 +00:00
|
|
|
case X86::CMOVNP64rr:
|
|
|
|
case X86::CMOVO16rr:
|
|
|
|
case X86::CMOVO32rr:
|
|
|
|
case X86::CMOVO64rr:
|
|
|
|
case X86::CMOVNO16rr:
|
|
|
|
case X86::CMOVNO32rr:
|
|
|
|
case X86::CMOVNO64rr: {
|
2007-10-05 23:13:21 +00:00
|
|
|
unsigned Opc = 0;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
|
|
|
|
case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
|
|
|
|
case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
|
|
|
|
case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
|
|
|
|
case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
|
|
|
|
case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
|
|
|
|
case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
|
|
|
|
case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
|
|
|
|
case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
|
|
|
|
case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
|
|
|
|
case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
|
|
|
|
case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
|
|
|
|
case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
|
|
|
|
case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
|
|
|
|
case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
|
|
|
|
case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
|
|
|
|
case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
|
|
|
|
case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
|
|
|
|
case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
|
|
|
|
case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
|
|
|
|
case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
|
|
|
|
case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
|
|
|
|
case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
|
|
|
|
case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
|
|
|
|
case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
|
|
|
|
case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
|
|
|
|
case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
|
|
|
|
case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
|
|
|
|
case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
|
|
|
|
case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
|
|
|
|
case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
|
|
|
|
case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
|
2009-04-18 05:16:01 +00:00
|
|
|
case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break;
|
2007-10-05 23:13:21 +00:00
|
|
|
case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
|
|
|
|
case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
|
|
|
|
case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
|
|
|
|
case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
|
|
|
|
case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
|
2009-04-18 05:16:01 +00:00
|
|
|
case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break;
|
2007-10-05 23:13:21 +00:00
|
|
|
case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
|
|
|
|
case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
|
|
|
|
case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
|
2009-01-07 00:35:10 +00:00
|
|
|
case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break;
|
|
|
|
case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break;
|
2009-04-18 05:16:01 +00:00
|
|
|
case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break;
|
2009-01-07 00:35:10 +00:00
|
|
|
case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break;
|
|
|
|
case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
|
|
|
|
case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
|
2007-10-05 23:13:21 +00:00
|
|
|
}
|
2008-10-17 01:23:35 +00:00
|
|
|
if (NewMI) {
|
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
|
|
|
MI = MF.CloneMachineInstr(MI);
|
|
|
|
NewMI = false;
|
|
|
|
}
|
2008-01-11 18:10:50 +00:00
|
|
|
MI->setDesc(get(Opc));
|
2007-10-05 23:13:21 +00:00
|
|
|
// Fallthrough intended.
|
|
|
|
}
|
Teach the code generator that shrd/shld is commutable if it has an immediate.
This allows us to generate this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %EDX, DWORD PTR [%ESP + 8]
shld %EDX, %EDX, 2
shl %EAX, 2
ret
instead of this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %ECX, DWORD PTR [%ESP + 8]
mov %EDX, %EAX
shrd %EDX, %ECX, 30
shl %EAX, 2
ret
Note the magically transmogrifying immediate.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19686 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-19 07:11:01 +00:00
|
|
|
default:
|
2008-06-16 07:33:11 +00:00
|
|
|
return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
|
Teach the code generator that shrd/shld is commutable if it has an immediate.
This allows us to generate this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %EDX, DWORD PTR [%ESP + 8]
shld %EDX, %EDX, 2
shl %EAX, 2
ret
instead of this:
foo:
mov %EAX, DWORD PTR [%ESP + 4]
mov %ECX, DWORD PTR [%ESP + 8]
mov %EDX, %EAX
shrd %EDX, %ECX, 30
shl %EAX, 2
ret
Note the magically transmogrifying immediate.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@19686 91177308-0d34-0410-b5e6-96231b3b80d8
2005-01-19 07:11:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-20 17:42:20 +00:00
|
|
|
static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
|
|
|
|
switch (BrOpc) {
|
|
|
|
default: return X86::COND_INVALID;
|
|
|
|
case X86::JE: return X86::COND_E;
|
|
|
|
case X86::JNE: return X86::COND_NE;
|
|
|
|
case X86::JL: return X86::COND_L;
|
|
|
|
case X86::JLE: return X86::COND_LE;
|
|
|
|
case X86::JG: return X86::COND_G;
|
|
|
|
case X86::JGE: return X86::COND_GE;
|
|
|
|
case X86::JB: return X86::COND_B;
|
|
|
|
case X86::JBE: return X86::COND_BE;
|
|
|
|
case X86::JA: return X86::COND_A;
|
|
|
|
case X86::JAE: return X86::COND_AE;
|
|
|
|
case X86::JS: return X86::COND_S;
|
|
|
|
case X86::JNS: return X86::COND_NS;
|
|
|
|
case X86::JP: return X86::COND_P;
|
|
|
|
case X86::JNP: return X86::COND_NP;
|
|
|
|
case X86::JO: return X86::COND_O;
|
|
|
|
case X86::JNO: return X86::COND_NO;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
|
|
|
|
switch (CC) {
|
|
|
|
default: assert(0 && "Illegal condition code!");
|
2007-09-29 00:00:36 +00:00
|
|
|
case X86::COND_E: return X86::JE;
|
|
|
|
case X86::COND_NE: return X86::JNE;
|
|
|
|
case X86::COND_L: return X86::JL;
|
|
|
|
case X86::COND_LE: return X86::JLE;
|
|
|
|
case X86::COND_G: return X86::JG;
|
|
|
|
case X86::COND_GE: return X86::JGE;
|
|
|
|
case X86::COND_B: return X86::JB;
|
|
|
|
case X86::COND_BE: return X86::JBE;
|
|
|
|
case X86::COND_A: return X86::JA;
|
|
|
|
case X86::COND_AE: return X86::JAE;
|
|
|
|
case X86::COND_S: return X86::JS;
|
|
|
|
case X86::COND_NS: return X86::JNS;
|
|
|
|
case X86::COND_P: return X86::JP;
|
|
|
|
case X86::COND_NP: return X86::JNP;
|
|
|
|
case X86::COND_O: return X86::JO;
|
|
|
|
case X86::COND_NO: return X86::JNO;
|
2006-10-20 17:42:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-21 05:52:40 +00:00
|
|
|
/// GetOppositeBranchCondition - Return the inverse of the specified condition,
|
|
|
|
/// e.g. turning COND_E to COND_NE.
|
|
|
|
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
|
|
|
|
switch (CC) {
|
|
|
|
default: assert(0 && "Illegal condition code!");
|
|
|
|
case X86::COND_E: return X86::COND_NE;
|
|
|
|
case X86::COND_NE: return X86::COND_E;
|
|
|
|
case X86::COND_L: return X86::COND_GE;
|
|
|
|
case X86::COND_LE: return X86::COND_G;
|
|
|
|
case X86::COND_G: return X86::COND_LE;
|
|
|
|
case X86::COND_GE: return X86::COND_L;
|
|
|
|
case X86::COND_B: return X86::COND_AE;
|
|
|
|
case X86::COND_BE: return X86::COND_A;
|
|
|
|
case X86::COND_A: return X86::COND_BE;
|
|
|
|
case X86::COND_AE: return X86::COND_B;
|
|
|
|
case X86::COND_S: return X86::COND_NS;
|
|
|
|
case X86::COND_NS: return X86::COND_S;
|
|
|
|
case X86::COND_P: return X86::COND_NP;
|
|
|
|
case X86::COND_NP: return X86::COND_P;
|
|
|
|
case X86::COND_O: return X86::COND_NO;
|
|
|
|
case X86::COND_NO: return X86::COND_O;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-06-14 22:03:45 +00:00
|
|
|
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
|
2008-01-07 07:27:27 +00:00
|
|
|
const TargetInstrDesc &TID = MI->getDesc();
|
|
|
|
if (!TID.isTerminator()) return false;
|
2008-01-07 01:56:04 +00:00
|
|
|
|
|
|
|
// Conditional branch is a special case.
|
2008-01-07 07:27:27 +00:00
|
|
|
if (TID.isBranch() && !TID.isBarrier())
|
2008-01-07 01:56:04 +00:00
|
|
|
return true;
|
2008-01-07 07:27:27 +00:00
|
|
|
if (!TID.isPredicable())
|
2008-01-07 01:56:04 +00:00
|
|
|
return true;
|
|
|
|
return !isPredicated(MI);
|
2007-06-14 22:03:45 +00:00
|
|
|
}
|
2006-10-21 05:52:40 +00:00
|
|
|
|
2007-07-26 17:32:14 +00:00
|
|
|
// For purposes of branch analysis do not count FP_REG_KILL as a terminator.
|
|
|
|
static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI,
|
|
|
|
const X86InstrInfo &TII) {
|
|
|
|
if (MI->getOpcode() == X86::FP_REG_KILL)
|
|
|
|
return false;
|
|
|
|
return TII.isUnpredicatedTerminator(MI);
|
|
|
|
}
|
|
|
|
|
2006-10-20 17:42:20 +00:00
|
|
|
bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock *&TBB,
|
|
|
|
MachineBasicBlock *&FBB,
|
2009-02-09 07:14:22 +00:00
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
// Start from the bottom of the block and work up, examining the
|
|
|
|
// terminator instructions.
|
2006-10-20 17:42:20 +00:00
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
while (I != MBB.begin()) {
|
|
|
|
--I;
|
|
|
|
// Working from the bottom, when we see a non-terminator
|
|
|
|
// instruction, we're done.
|
|
|
|
if (!isBrAnalysisUnpredicatedTerminator(I, *this))
|
|
|
|
break;
|
|
|
|
// A terminator that isn't a branch can't easily be handled
|
|
|
|
// by this analysis.
|
|
|
|
if (!I->getDesc().isBranch())
|
2006-10-20 17:42:20 +00:00
|
|
|
return true;
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
// Handle unconditional branches.
|
|
|
|
if (I->getOpcode() == X86::JMP) {
|
2009-02-09 07:14:22 +00:00
|
|
|
if (!AllowModify) {
|
|
|
|
TBB = I->getOperand(0).getMBB();
|
2009-05-08 06:34:09 +00:00
|
|
|
continue;
|
2009-02-09 07:14:22 +00:00
|
|
|
}
|
|
|
|
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
// If the block has any instructions after a JMP, delete them.
|
|
|
|
while (next(I) != MBB.end())
|
|
|
|
next(I)->eraseFromParent();
|
|
|
|
Cond.clear();
|
|
|
|
FBB = 0;
|
|
|
|
// Delete the JMP if it's equivalent to a fall-through.
|
|
|
|
if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
|
|
|
|
TBB = 0;
|
|
|
|
I->eraseFromParent();
|
|
|
|
I = MBB.end();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// TBB is used to indicate the unconditinal destination.
|
|
|
|
TBB = I->getOperand(0).getMBB();
|
|
|
|
continue;
|
2006-10-20 17:42:20 +00:00
|
|
|
}
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
// Handle conditional branches.
|
|
|
|
X86::CondCode BranchCode = GetCondFromBranchOpc(I->getOpcode());
|
2006-10-20 17:42:20 +00:00
|
|
|
if (BranchCode == X86::COND_INVALID)
|
|
|
|
return true; // Can't handle indirect branch.
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
// Working from the bottom, handle the first conditional branch.
|
|
|
|
if (Cond.empty()) {
|
|
|
|
FBB = TBB;
|
|
|
|
TBB = I->getOperand(0).getMBB();
|
|
|
|
Cond.push_back(MachineOperand::CreateImm(BranchCode));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Handle subsequent conditional branches. Only handle the case
|
|
|
|
// where all conditional branches branch to the same destination
|
|
|
|
// and their condition opcodes fit one of the special
|
|
|
|
// multi-branch idioms.
|
|
|
|
assert(Cond.size() == 1);
|
|
|
|
assert(TBB);
|
|
|
|
// Only handle the case where all conditional branches branch to
|
|
|
|
// the same destination.
|
|
|
|
if (TBB != I->getOperand(0).getMBB())
|
|
|
|
return true;
|
|
|
|
X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
|
|
|
|
// If the conditions are the same, we can leave them alone.
|
|
|
|
if (OldBranchCode == BranchCode)
|
|
|
|
continue;
|
|
|
|
// If they differ, see if they fit one of the known patterns.
|
|
|
|
// Theoretically we could handle more patterns here, but
|
|
|
|
// we shouldn't expect to see them if instruction selection
|
|
|
|
// has done a reasonable job.
|
|
|
|
if ((OldBranchCode == X86::COND_NP &&
|
|
|
|
BranchCode == X86::COND_E) ||
|
|
|
|
(OldBranchCode == X86::COND_E &&
|
|
|
|
BranchCode == X86::COND_NP))
|
|
|
|
BranchCode = X86::COND_NP_OR_E;
|
|
|
|
else if ((OldBranchCode == X86::COND_P &&
|
|
|
|
BranchCode == X86::COND_NE) ||
|
|
|
|
(OldBranchCode == X86::COND_NE &&
|
|
|
|
BranchCode == X86::COND_P))
|
|
|
|
BranchCode = X86::COND_NE_OR_P;
|
|
|
|
else
|
|
|
|
return true;
|
|
|
|
// Update the MachineOperand.
|
|
|
|
Cond[0].setImm(BranchCode);
|
2007-06-13 17:59:52 +00:00
|
|
|
}
|
|
|
|
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
return false;
|
2006-10-20 17:42:20 +00:00
|
|
|
}
|
|
|
|
|
2007-05-18 00:18:17 +00:00
|
|
|
unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
|
2006-10-20 17:42:20 +00:00
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
unsigned Count = 0;
|
|
|
|
|
|
|
|
while (I != MBB.begin()) {
|
|
|
|
--I;
|
|
|
|
if (I->getOpcode() != X86::JMP &&
|
|
|
|
GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
|
|
|
|
break;
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
I = MBB.end();
|
|
|
|
++Count;
|
|
|
|
}
|
2006-10-20 17:42:20 +00:00
|
|
|
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
return Count;
|
2006-10-20 17:42:20 +00:00
|
|
|
}
|
|
|
|
|
2007-05-18 00:18:17 +00:00
|
|
|
unsigned
|
|
|
|
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
|
|
|
MachineBasicBlock *FBB,
|
2008-08-14 22:49:33 +00:00
|
|
|
const SmallVectorImpl<MachineOperand> &Cond) const {
|
2009-02-13 02:33:27 +00:00
|
|
|
// FIXME this should probably have a DebugLoc operand
|
|
|
|
DebugLoc dl = DebugLoc::getUnknownLoc();
|
2006-10-20 17:42:20 +00:00
|
|
|
// Shouldn't be a fall through.
|
|
|
|
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
|
2006-10-21 05:34:23 +00:00
|
|
|
assert((Cond.size() == 1 || Cond.size() == 0) &&
|
|
|
|
"X86 branch conditions have one component!");
|
|
|
|
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
if (Cond.empty()) {
|
|
|
|
// Unconditional branch?
|
|
|
|
assert(!FBB && "Unconditional branch with multiple successors!");
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(&MBB, dl, get(X86::JMP)).addMBB(TBB);
|
2007-05-18 00:18:17 +00:00
|
|
|
return 1;
|
2006-10-20 17:42:20 +00:00
|
|
|
}
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
|
|
|
|
// Conditional branch.
|
|
|
|
unsigned Count = 0;
|
|
|
|
X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
|
|
|
|
switch (CC) {
|
|
|
|
case X86::COND_NP_OR_E:
|
|
|
|
// Synthesize NP_OR_E with two branches.
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(&MBB, dl, get(X86::JNP)).addMBB(TBB);
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
++Count;
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(&MBB, dl, get(X86::JE)).addMBB(TBB);
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
++Count;
|
|
|
|
break;
|
|
|
|
case X86::COND_NE_OR_P:
|
|
|
|
// Synthesize NE_OR_P with two branches.
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(&MBB, dl, get(X86::JNE)).addMBB(TBB);
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
++Count;
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(&MBB, dl, get(X86::JP)).addMBB(TBB);
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
++Count;
|
|
|
|
break;
|
|
|
|
default: {
|
|
|
|
unsigned Opc = GetCondBranchFromCond(CC);
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(&MBB, dl, get(Opc)).addMBB(TBB);
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
++Count;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (FBB) {
|
|
|
|
// Two-way Conditional branch. Insert the second branch.
|
2009-02-13 02:33:27 +00:00
|
|
|
BuildMI(&MBB, dl, get(X86::JMP)).addMBB(FBB);
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
++Count;
|
|
|
|
}
|
|
|
|
return Count;
|
2006-10-20 17:42:20 +00:00
|
|
|
}
|
|
|
|
|
2009-04-15 00:04:23 +00:00
|
|
|
/// isHReg - Test if the given register is a physical h register.
|
|
|
|
static bool isHReg(unsigned Reg) {
|
2009-04-27 16:41:36 +00:00
|
|
|
return X86::GR8_ABCD_HRegClass.contains(Reg);
|
2009-04-15 00:04:23 +00:00
|
|
|
}
|
|
|
|
|
2008-08-26 18:03:31 +00:00
|
|
|
bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
|
2008-03-09 08:46:19 +00:00
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
unsigned DestReg, unsigned SrcReg,
|
|
|
|
const TargetRegisterClass *DestRC,
|
|
|
|
const TargetRegisterClass *SrcRC) const {
|
2009-02-11 21:51:19 +00:00
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
|
|
|
|
2009-04-20 22:54:34 +00:00
|
|
|
// Determine if DstRC and SrcRC have a common superclass in common.
|
|
|
|
const TargetRegisterClass *CommonRC = DestRC;
|
|
|
|
if (DestRC == SrcRC)
|
|
|
|
/* Source and destination have the same register class. */;
|
|
|
|
else if (CommonRC->hasSuperClass(SrcRC))
|
|
|
|
CommonRC = SrcRC;
|
|
|
|
else if (!DestRC->hasSubClass(SrcRC))
|
|
|
|
CommonRC = 0;
|
|
|
|
|
|
|
|
if (CommonRC) {
|
2008-03-09 07:58:04 +00:00
|
|
|
unsigned Opc;
|
2009-04-20 22:54:34 +00:00
|
|
|
if (CommonRC == &X86::GR64RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::MOV64rr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::GR32RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::MOV32rr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::GR16RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::MOV16rr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::GR8RegClass) {
|
2009-04-27 16:41:36 +00:00
|
|
|
// Copying to or from a physical H register on x86-64 requires a NOREX
|
2009-04-17 22:40:38 +00:00
|
|
|
// move. Otherwise use a normal move.
|
|
|
|
if ((isHReg(DestReg) || isHReg(SrcReg)) &&
|
|
|
|
TM.getSubtarget<X86Subtarget>().is64Bit())
|
2009-04-15 00:04:23 +00:00
|
|
|
Opc = X86::MOV8rr_NOREX;
|
|
|
|
else
|
|
|
|
Opc = X86::MOV8rr;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (CommonRC == &X86::GR64_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV64rr;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (CommonRC == &X86::GR32_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV32rr;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (CommonRC == &X86::GR16_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV16rr;
|
2009-04-27 16:41:36 +00:00
|
|
|
} else if (CommonRC == &X86::GR8_ABCD_LRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV8rr;
|
2009-04-27 16:41:36 +00:00
|
|
|
} else if (CommonRC == &X86::GR8_ABCD_HRegClass) {
|
|
|
|
if (TM.getSubtarget<X86Subtarget>().is64Bit())
|
|
|
|
Opc = X86::MOV8rr_NOREX;
|
|
|
|
else
|
|
|
|
Opc = X86::MOV8rr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::GR64_NOREXRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV64rr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::GR32_NOREXRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV32rr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::GR16_NOREXRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV16rr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::GR8_NOREXRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV8rr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::RFP32RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::MOV_Fp3232;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::RFP64RegClass || CommonRC == &X86::RSTRegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::MOV_Fp6464;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::RFP80RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::MOV_Fp8080;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::FR32RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::FsMOVAPSrr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::FR64RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::FsMOVAPDrr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::VR128RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::MOVAPSrr;
|
2009-04-20 22:54:34 +00:00
|
|
|
} else if (CommonRC == &X86::VR64RegClass) {
|
2008-03-09 07:58:04 +00:00
|
|
|
Opc = X86::MMX_MOVQ64rr;
|
|
|
|
} else {
|
2008-08-26 18:03:31 +00:00
|
|
|
return false;
|
2007-12-31 06:32:00 +00:00
|
|
|
}
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(MBB, MI, DL, get(Opc), DestReg).addReg(SrcReg);
|
2008-08-26 18:03:31 +00:00
|
|
|
return true;
|
2007-12-31 06:32:00 +00:00
|
|
|
}
|
2008-03-09 07:58:04 +00:00
|
|
|
|
|
|
|
// Moving EFLAGS to / from another register requires a push and a pop.
|
|
|
|
if (SrcRC == &X86::CCRRegClass) {
|
2008-08-26 18:50:40 +00:00
|
|
|
if (SrcReg != X86::EFLAGS)
|
|
|
|
return false;
|
2008-03-09 07:58:04 +00:00
|
|
|
if (DestRC == &X86::GR64RegClass) {
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(MBB, MI, DL, get(X86::PUSHFQ));
|
|
|
|
BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
|
2008-08-26 18:03:31 +00:00
|
|
|
return true;
|
2008-03-09 07:58:04 +00:00
|
|
|
} else if (DestRC == &X86::GR32RegClass) {
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(MBB, MI, DL, get(X86::PUSHFD));
|
|
|
|
BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
|
2008-08-26 18:03:31 +00:00
|
|
|
return true;
|
2008-03-09 07:58:04 +00:00
|
|
|
}
|
|
|
|
} else if (DestRC == &X86::CCRRegClass) {
|
2008-08-26 18:50:40 +00:00
|
|
|
if (DestReg != X86::EFLAGS)
|
|
|
|
return false;
|
2008-03-09 07:58:04 +00:00
|
|
|
if (SrcRC == &X86::GR64RegClass) {
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(MBB, MI, DL, get(X86::PUSH64r)).addReg(SrcReg);
|
|
|
|
BuildMI(MBB, MI, DL, get(X86::POPFQ));
|
2008-08-26 18:03:31 +00:00
|
|
|
return true;
|
2008-03-09 07:58:04 +00:00
|
|
|
} else if (SrcRC == &X86::GR32RegClass) {
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(MBB, MI, DL, get(X86::PUSH32r)).addReg(SrcReg);
|
|
|
|
BuildMI(MBB, MI, DL, get(X86::POPFD));
|
2008-08-26 18:03:31 +00:00
|
|
|
return true;
|
2008-03-09 07:58:04 +00:00
|
|
|
}
|
2007-12-31 06:32:00 +00:00
|
|
|
}
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
|
2008-03-09 09:15:31 +00:00
|
|
|
// Moving from ST(0) turns into FpGET_ST0_32 etc.
|
2008-03-09 08:46:19 +00:00
|
|
|
if (SrcRC == &X86::RSTRegClass) {
|
2008-03-21 06:38:26 +00:00
|
|
|
// Copying from ST(0)/ST(1).
|
2008-08-26 18:03:31 +00:00
|
|
|
if (SrcReg != X86::ST0 && SrcReg != X86::ST1)
|
|
|
|
// Can only copy from ST(0)/ST(1) right now
|
|
|
|
return false;
|
2008-03-21 06:38:26 +00:00
|
|
|
bool isST0 = SrcReg == X86::ST0;
|
2008-03-09 08:46:19 +00:00
|
|
|
unsigned Opc;
|
|
|
|
if (DestRC == &X86::RFP32RegClass)
|
2008-03-21 06:38:26 +00:00
|
|
|
Opc = isST0 ? X86::FpGET_ST0_32 : X86::FpGET_ST1_32;
|
2008-03-09 08:46:19 +00:00
|
|
|
else if (DestRC == &X86::RFP64RegClass)
|
2008-03-21 06:38:26 +00:00
|
|
|
Opc = isST0 ? X86::FpGET_ST0_64 : X86::FpGET_ST1_64;
|
2008-03-09 08:46:19 +00:00
|
|
|
else {
|
2008-08-26 18:50:40 +00:00
|
|
|
if (DestRC != &X86::RFP80RegClass)
|
|
|
|
return false;
|
2008-03-21 06:38:26 +00:00
|
|
|
Opc = isST0 ? X86::FpGET_ST0_80 : X86::FpGET_ST1_80;
|
2008-03-09 08:46:19 +00:00
|
|
|
}
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(MBB, MI, DL, get(Opc), DestReg);
|
2008-08-26 18:03:31 +00:00
|
|
|
return true;
|
2008-03-09 08:46:19 +00:00
|
|
|
}
|
2008-03-09 09:15:31 +00:00
|
|
|
|
|
|
|
// Moving to ST(0) turns into FpSET_ST0_32 etc.
|
|
|
|
if (DestRC == &X86::RSTRegClass) {
|
2009-02-09 23:32:07 +00:00
|
|
|
// Copying to ST(0) / ST(1).
|
|
|
|
if (DestReg != X86::ST0 && DestReg != X86::ST1)
|
2008-08-26 18:03:31 +00:00
|
|
|
// Can only copy to TOS right now
|
|
|
|
return false;
|
2009-02-09 23:32:07 +00:00
|
|
|
bool isST0 = DestReg == X86::ST0;
|
2008-03-09 09:15:31 +00:00
|
|
|
unsigned Opc;
|
|
|
|
if (SrcRC == &X86::RFP32RegClass)
|
2009-02-09 23:32:07 +00:00
|
|
|
Opc = isST0 ? X86::FpSET_ST0_32 : X86::FpSET_ST1_32;
|
2008-03-09 09:15:31 +00:00
|
|
|
else if (SrcRC == &X86::RFP64RegClass)
|
2009-02-09 23:32:07 +00:00
|
|
|
Opc = isST0 ? X86::FpSET_ST0_64 : X86::FpSET_ST1_64;
|
2008-03-09 09:15:31 +00:00
|
|
|
else {
|
2008-08-26 18:50:40 +00:00
|
|
|
if (SrcRC != &X86::RFP80RegClass)
|
|
|
|
return false;
|
2009-02-09 23:32:07 +00:00
|
|
|
Opc = isST0 ? X86::FpSET_ST0_80 : X86::FpSET_ST1_80;
|
2008-03-09 09:15:31 +00:00
|
|
|
}
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(MBB, MI, DL, get(Opc)).addReg(SrcReg);
|
2008-08-26 18:03:31 +00:00
|
|
|
return true;
|
2008-03-09 09:15:31 +00:00
|
|
|
}
|
2008-03-09 08:46:19 +00:00
|
|
|
|
2008-08-26 18:03:31 +00:00
|
|
|
// Not yet supported!
|
|
|
|
return false;
|
2007-12-31 06:32:00 +00:00
|
|
|
}
|
|
|
|
|
2009-04-27 16:41:36 +00:00
|
|
|
static unsigned getStoreRegOpcode(unsigned SrcReg,
|
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
bool isStackAligned,
|
|
|
|
TargetMachine &TM) {
|
2008-01-01 21:11:32 +00:00
|
|
|
unsigned Opc = 0;
|
|
|
|
if (RC == &X86::GR64RegClass) {
|
|
|
|
Opc = X86::MOV64mr;
|
|
|
|
} else if (RC == &X86::GR32RegClass) {
|
|
|
|
Opc = X86::MOV32mr;
|
|
|
|
} else if (RC == &X86::GR16RegClass) {
|
|
|
|
Opc = X86::MOV16mr;
|
|
|
|
} else if (RC == &X86::GR8RegClass) {
|
2009-04-27 16:41:36 +00:00
|
|
|
// Copying to or from a physical H register on x86-64 requires a NOREX
|
|
|
|
// move. Otherwise use a normal move.
|
|
|
|
if (isHReg(SrcReg) &&
|
|
|
|
TM.getSubtarget<X86Subtarget>().is64Bit())
|
|
|
|
Opc = X86::MOV8mr_NOREX;
|
|
|
|
else
|
|
|
|
Opc = X86::MOV8mr;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (RC == &X86::GR64_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV64mr;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (RC == &X86::GR32_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV32mr;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (RC == &X86::GR16_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV16mr;
|
2009-04-27 16:41:36 +00:00
|
|
|
} else if (RC == &X86::GR8_ABCD_LRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV8mr;
|
2009-04-27 16:41:36 +00:00
|
|
|
} else if (RC == &X86::GR8_ABCD_HRegClass) {
|
|
|
|
if (TM.getSubtarget<X86Subtarget>().is64Bit())
|
|
|
|
Opc = X86::MOV8mr_NOREX;
|
|
|
|
else
|
|
|
|
Opc = X86::MOV8mr;
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
} else if (RC == &X86::GR64_NOREXRegClass) {
|
|
|
|
Opc = X86::MOV64mr;
|
|
|
|
} else if (RC == &X86::GR32_NOREXRegClass) {
|
|
|
|
Opc = X86::MOV32mr;
|
|
|
|
} else if (RC == &X86::GR16_NOREXRegClass) {
|
|
|
|
Opc = X86::MOV16mr;
|
|
|
|
} else if (RC == &X86::GR8_NOREXRegClass) {
|
|
|
|
Opc = X86::MOV8mr;
|
2008-01-01 21:11:32 +00:00
|
|
|
} else if (RC == &X86::RFP80RegClass) {
|
|
|
|
Opc = X86::ST_FpP80m; // pops
|
|
|
|
} else if (RC == &X86::RFP64RegClass) {
|
|
|
|
Opc = X86::ST_Fp64m;
|
|
|
|
} else if (RC == &X86::RFP32RegClass) {
|
|
|
|
Opc = X86::ST_Fp32m;
|
|
|
|
} else if (RC == &X86::FR32RegClass) {
|
|
|
|
Opc = X86::MOVSSmr;
|
|
|
|
} else if (RC == &X86::FR64RegClass) {
|
|
|
|
Opc = X86::MOVSDmr;
|
|
|
|
} else if (RC == &X86::VR128RegClass) {
|
2008-07-19 06:30:51 +00:00
|
|
|
// If stack is realigned we can use aligned stores.
|
|
|
|
Opc = isStackAligned ? X86::MOVAPSmr : X86::MOVUPSmr;
|
2008-01-01 21:11:32 +00:00
|
|
|
} else if (RC == &X86::VR64RegClass) {
|
|
|
|
Opc = X86::MMX_MOVQ64mr;
|
|
|
|
} else {
|
2009-07-08 18:01:40 +00:00
|
|
|
LLVM_UNREACHABLE("Unknown regclass");
|
2008-01-01 21:11:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Opc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
unsigned SrcReg, bool isKill, int FrameIdx,
|
|
|
|
const TargetRegisterClass *RC) const {
|
2008-07-19 06:30:51 +00:00
|
|
|
const MachineFunction &MF = *MBB.getParent();
|
2008-07-21 06:34:17 +00:00
|
|
|
bool isAligned = (RI.getStackAlignment() >= 16) ||
|
|
|
|
RI.needsStackRealignment(MF);
|
2009-04-27 16:41:36 +00:00
|
|
|
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
|
2009-02-11 21:51:19 +00:00
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
|
|
|
addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
|
2009-05-13 21:33:08 +00:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill));
|
2008-01-01 21:11:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
|
|
|
|
bool isKill,
|
|
|
|
SmallVectorImpl<MachineOperand> &Addr,
|
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
SmallVectorImpl<MachineInstr*> &NewMIs) const {
|
2008-07-21 06:34:17 +00:00
|
|
|
bool isAligned = (RI.getStackAlignment() >= 16) ||
|
|
|
|
RI.needsStackRealignment(MF);
|
2009-04-27 16:41:36 +00:00
|
|
|
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
|
2009-02-12 23:08:38 +00:00
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
|
2008-01-01 21:11:32 +00:00
|
|
|
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(Addr[i]);
|
2009-05-13 21:33:08 +00:00
|
|
|
MIB.addReg(SrcReg, getKillRegState(isKill));
|
2008-01-01 21:11:32 +00:00
|
|
|
NewMIs.push_back(MIB);
|
|
|
|
}
|
|
|
|
|
2009-04-27 16:41:36 +00:00
|
|
|
static unsigned getLoadRegOpcode(unsigned DestReg,
|
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
bool isStackAligned,
|
|
|
|
const TargetMachine &TM) {
|
2008-01-01 21:11:32 +00:00
|
|
|
unsigned Opc = 0;
|
|
|
|
if (RC == &X86::GR64RegClass) {
|
|
|
|
Opc = X86::MOV64rm;
|
|
|
|
} else if (RC == &X86::GR32RegClass) {
|
|
|
|
Opc = X86::MOV32rm;
|
|
|
|
} else if (RC == &X86::GR16RegClass) {
|
|
|
|
Opc = X86::MOV16rm;
|
|
|
|
} else if (RC == &X86::GR8RegClass) {
|
2009-04-27 16:41:36 +00:00
|
|
|
// Copying to or from a physical H register on x86-64 requires a NOREX
|
|
|
|
// move. Otherwise use a normal move.
|
|
|
|
if (isHReg(DestReg) &&
|
|
|
|
TM.getSubtarget<X86Subtarget>().is64Bit())
|
|
|
|
Opc = X86::MOV8rm_NOREX;
|
|
|
|
else
|
|
|
|
Opc = X86::MOV8rm;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (RC == &X86::GR64_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV64rm;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (RC == &X86::GR32_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV32rm;
|
2009-04-27 16:33:14 +00:00
|
|
|
} else if (RC == &X86::GR16_ABCDRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV16rm;
|
2009-04-27 16:41:36 +00:00
|
|
|
} else if (RC == &X86::GR8_ABCD_LRegClass) {
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
Opc = X86::MOV8rm;
|
2009-04-27 16:41:36 +00:00
|
|
|
} else if (RC == &X86::GR8_ABCD_HRegClass) {
|
|
|
|
if (TM.getSubtarget<X86Subtarget>().is64Bit())
|
|
|
|
Opc = X86::MOV8rm_NOREX;
|
|
|
|
else
|
|
|
|
Opc = X86::MOV8rm;
|
Implement x86 h-register extract support.
- Add patterns for h-register extract, which avoids a shift and mask,
and in some cases a temporary register.
- Add address-mode matching for turning (X>>(8-n))&(255<<n), where
n is a valid address-mode scale value, into an h-register extract
and a scaled-offset address.
- Replace X86's MOV32to32_ and related instructions with the new
target-independent COPY_TO_SUBREG instruction.
On x86-64 there are complicated constraints on h registers, and
CodeGen doesn't currently provide a high-level way to express all of them,
so they are handled with a bunch of special code. This code currently only
supports extracts where the result is used by a zero-extend or a store,
though these are fairly common.
These transformations are not always beneficial; since there are only
4 h registers, they sometimes require extra move instructions, and
this sometimes increases register pressure because it can force out
values that would otherwise be in one of those registers. However,
this appears to be relatively uncommon.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68962 91177308-0d34-0410-b5e6-96231b3b80d8
2009-04-13 16:09:41 +00:00
|
|
|
} else if (RC == &X86::GR64_NOREXRegClass) {
|
|
|
|
Opc = X86::MOV64rm;
|
|
|
|
} else if (RC == &X86::GR32_NOREXRegClass) {
|
|
|
|
Opc = X86::MOV32rm;
|
|
|
|
} else if (RC == &X86::GR16_NOREXRegClass) {
|
|
|
|
Opc = X86::MOV16rm;
|
|
|
|
} else if (RC == &X86::GR8_NOREXRegClass) {
|
|
|
|
Opc = X86::MOV8rm;
|
2008-01-01 21:11:32 +00:00
|
|
|
} else if (RC == &X86::RFP80RegClass) {
|
|
|
|
Opc = X86::LD_Fp80m;
|
|
|
|
} else if (RC == &X86::RFP64RegClass) {
|
|
|
|
Opc = X86::LD_Fp64m;
|
|
|
|
} else if (RC == &X86::RFP32RegClass) {
|
|
|
|
Opc = X86::LD_Fp32m;
|
|
|
|
} else if (RC == &X86::FR32RegClass) {
|
|
|
|
Opc = X86::MOVSSrm;
|
|
|
|
} else if (RC == &X86::FR64RegClass) {
|
|
|
|
Opc = X86::MOVSDrm;
|
|
|
|
} else if (RC == &X86::VR128RegClass) {
|
2008-07-19 06:30:51 +00:00
|
|
|
// If stack is realigned we can use aligned loads.
|
|
|
|
Opc = isStackAligned ? X86::MOVAPSrm : X86::MOVUPSrm;
|
2008-01-01 21:11:32 +00:00
|
|
|
} else if (RC == &X86::VR64RegClass) {
|
|
|
|
Opc = X86::MMX_MOVQ64rm;
|
|
|
|
} else {
|
2009-07-08 18:01:40 +00:00
|
|
|
LLVM_UNREACHABLE("Unknown regclass");
|
2008-01-01 21:11:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Opc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
2008-07-19 06:30:51 +00:00
|
|
|
MachineBasicBlock::iterator MI,
|
|
|
|
unsigned DestReg, int FrameIdx,
|
|
|
|
const TargetRegisterClass *RC) const{
|
|
|
|
const MachineFunction &MF = *MBB.getParent();
|
2008-07-21 06:34:17 +00:00
|
|
|
bool isAligned = (RI.getStackAlignment() >= 16) ||
|
|
|
|
RI.needsStackRealignment(MF);
|
2009-04-27 16:41:36 +00:00
|
|
|
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
|
2009-02-11 21:51:19 +00:00
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
|
|
|
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
|
2008-01-01 21:11:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
2008-07-03 09:09:37 +00:00
|
|
|
SmallVectorImpl<MachineOperand> &Addr,
|
|
|
|
const TargetRegisterClass *RC,
|
2008-01-01 21:11:32 +00:00
|
|
|
SmallVectorImpl<MachineInstr*> &NewMIs) const {
|
2008-07-21 06:34:17 +00:00
|
|
|
bool isAligned = (RI.getStackAlignment() >= 16) ||
|
|
|
|
RI.needsStackRealignment(MF);
|
2009-04-27 16:41:36 +00:00
|
|
|
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
|
2009-02-12 23:08:38 +00:00
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
|
2008-01-01 21:11:32 +00:00
|
|
|
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(Addr[i]);
|
2008-01-01 21:11:32 +00:00
|
|
|
NewMIs.push_back(MIB);
|
|
|
|
}
|
|
|
|
|
2008-01-04 23:57:37 +00:00
|
|
|
bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
2009-02-11 21:51:19 +00:00
|
|
|
MachineBasicBlock::iterator MI,
|
2008-01-04 23:57:37 +00:00
|
|
|
const std::vector<CalleeSavedInfo> &CSI) const {
|
|
|
|
if (CSI.empty())
|
|
|
|
return false;
|
|
|
|
|
2009-02-11 21:51:19 +00:00
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
|
|
|
|
2008-09-26 19:14:21 +00:00
|
|
|
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
|
2008-10-04 11:09:36 +00:00
|
|
|
unsigned SlotSize = is64Bit ? 8 : 4;
|
|
|
|
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
2009-07-09 06:53:48 +00:00
|
|
|
unsigned FPReg = RI.getFrameRegister(MF);
|
2008-10-04 11:09:36 +00:00
|
|
|
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
|
2009-06-04 02:32:04 +00:00
|
|
|
unsigned CalleeFrameSize = 0;
|
2008-10-04 11:09:36 +00:00
|
|
|
|
2008-01-04 23:57:37 +00:00
|
|
|
unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
|
|
|
|
for (unsigned i = CSI.size(); i != 0; --i) {
|
|
|
|
unsigned Reg = CSI[i-1].getReg();
|
2009-06-04 02:32:04 +00:00
|
|
|
const TargetRegisterClass *RegClass = CSI[i-1].getRegClass();
|
2008-01-04 23:57:37 +00:00
|
|
|
// Add the callee-saved register as live-in. It's killed at the spill.
|
|
|
|
MBB.addLiveIn(Reg);
|
2009-07-09 06:53:48 +00:00
|
|
|
if (Reg == FPReg)
|
|
|
|
// X86RegisterInfo::emitPrologue will handle spilling of frame register.
|
|
|
|
continue;
|
2009-06-04 02:32:04 +00:00
|
|
|
if (RegClass != &X86::VR128RegClass) {
|
|
|
|
CalleeFrameSize += SlotSize;
|
2009-07-09 06:53:48 +00:00
|
|
|
BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill);
|
2009-06-04 02:32:04 +00:00
|
|
|
} else {
|
|
|
|
storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(), RegClass);
|
|
|
|
}
|
2008-01-04 23:57:37 +00:00
|
|
|
}
|
2009-06-04 02:32:04 +00:00
|
|
|
|
|
|
|
X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
|
2008-01-04 23:57:37 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
2009-02-11 21:51:19 +00:00
|
|
|
MachineBasicBlock::iterator MI,
|
2008-01-04 23:57:37 +00:00
|
|
|
const std::vector<CalleeSavedInfo> &CSI) const {
|
|
|
|
if (CSI.empty())
|
|
|
|
return false;
|
2009-02-11 21:51:19 +00:00
|
|
|
|
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
|
|
|
|
2009-07-09 06:53:48 +00:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
unsigned FPReg = RI.getFrameRegister(MF);
|
2008-01-04 23:57:37 +00:00
|
|
|
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
|
|
|
|
unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
|
|
|
|
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
|
|
|
|
unsigned Reg = CSI[i].getReg();
|
2009-07-09 06:53:48 +00:00
|
|
|
if (Reg == FPReg)
|
|
|
|
// X86RegisterInfo::emitEpilogue will handle restoring of frame register.
|
|
|
|
continue;
|
2009-06-04 02:32:04 +00:00
|
|
|
const TargetRegisterClass *RegClass = CSI[i].getRegClass();
|
|
|
|
if (RegClass != &X86::VR128RegClass) {
|
|
|
|
BuildMI(MBB, MI, DL, get(Opc), Reg);
|
|
|
|
} else {
|
|
|
|
loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RegClass);
|
|
|
|
}
|
2008-01-04 23:57:37 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-07-07 23:14:23 +00:00
|
|
|
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
|
2009-01-05 17:59:02 +00:00
|
|
|
const SmallVectorImpl<MachineOperand> &MOs,
|
2009-02-03 00:55:04 +00:00
|
|
|
MachineInstr *MI,
|
|
|
|
const TargetInstrInfo &TII) {
|
2008-01-07 01:35:02 +00:00
|
|
|
// Create the base instruction with the memory operand as the first part.
|
2009-02-03 00:55:04 +00:00
|
|
|
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
|
|
|
|
MI->getDebugLoc(), true);
|
2008-01-07 01:35:02 +00:00
|
|
|
MachineInstrBuilder MIB(NewMI);
|
|
|
|
unsigned NumAddrOps = MOs.size();
|
|
|
|
for (unsigned i = 0; i != NumAddrOps; ++i)
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(MOs[i]);
|
2008-01-07 01:35:02 +00:00
|
|
|
if (NumAddrOps < 4) // FrameIndex only
|
2009-04-08 21:14:34 +00:00
|
|
|
addOffset(MIB, 0);
|
2008-01-07 01:35:02 +00:00
|
|
|
|
|
|
|
// Loop over the rest of the ri operands, converting them over.
|
2008-01-07 07:27:27 +00:00
|
|
|
unsigned NumOps = MI->getDesc().getNumOperands()-2;
|
2008-01-07 01:35:02 +00:00
|
|
|
for (unsigned i = 0; i != NumOps; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i+2);
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(MO);
|
2008-01-07 01:35:02 +00:00
|
|
|
}
|
|
|
|
for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(MO);
|
2008-01-07 01:35:02 +00:00
|
|
|
}
|
|
|
|
return MIB;
|
|
|
|
}
|
|
|
|
|
2008-07-07 23:14:23 +00:00
|
|
|
static MachineInstr *FuseInst(MachineFunction &MF,
|
|
|
|
unsigned Opcode, unsigned OpNo,
|
2009-01-05 17:59:02 +00:00
|
|
|
const SmallVectorImpl<MachineOperand> &MOs,
|
2008-01-07 01:35:02 +00:00
|
|
|
MachineInstr *MI, const TargetInstrInfo &TII) {
|
2009-02-03 00:55:04 +00:00
|
|
|
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
|
|
|
|
MI->getDebugLoc(), true);
|
2008-01-07 01:35:02 +00:00
|
|
|
MachineInstrBuilder MIB(NewMI);
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (i == OpNo) {
|
2008-10-03 15:45:36 +00:00
|
|
|
assert(MO.isReg() && "Expected to fold into reg operand!");
|
2008-01-07 01:35:02 +00:00
|
|
|
unsigned NumAddrOps = MOs.size();
|
|
|
|
for (unsigned i = 0; i != NumAddrOps; ++i)
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(MOs[i]);
|
2008-01-07 01:35:02 +00:00
|
|
|
if (NumAddrOps < 4) // FrameIndex only
|
2009-04-08 21:14:34 +00:00
|
|
|
addOffset(MIB, 0);
|
2008-01-07 01:35:02 +00:00
|
|
|
} else {
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(MO);
|
2008-01-07 01:35:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return MIB;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
|
2009-01-05 17:59:02 +00:00
|
|
|
const SmallVectorImpl<MachineOperand> &MOs,
|
2008-01-07 01:35:02 +00:00
|
|
|
MachineInstr *MI) {
|
2008-07-07 23:14:23 +00:00
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
2009-02-11 21:51:19 +00:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
|
2008-01-07 01:35:02 +00:00
|
|
|
|
|
|
|
unsigned NumAddrOps = MOs.size();
|
|
|
|
for (unsigned i = 0; i != NumAddrOps; ++i)
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(MOs[i]);
|
2008-01-07 01:35:02 +00:00
|
|
|
if (NumAddrOps < 4) // FrameIndex only
|
2009-04-08 21:14:34 +00:00
|
|
|
addOffset(MIB, 0);
|
2008-01-07 01:35:02 +00:00
|
|
|
return MIB.addImm(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr*
|
2008-12-03 18:43:12 +00:00
|
|
|
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
|
|
|
MachineInstr *MI, unsigned i,
|
2009-01-05 17:59:02 +00:00
|
|
|
const SmallVectorImpl<MachineOperand> &MOs) const{
|
2008-01-07 01:35:02 +00:00
|
|
|
const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL;
|
|
|
|
bool isTwoAddrFold = false;
|
2008-01-07 07:27:27 +00:00
|
|
|
unsigned NumOps = MI->getDesc().getNumOperands();
|
2008-01-07 01:35:02 +00:00
|
|
|
bool isTwoAddr = NumOps > 1 &&
|
2008-01-07 07:27:27 +00:00
|
|
|
MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
|
2008-01-07 01:35:02 +00:00
|
|
|
|
|
|
|
MachineInstr *NewMI = NULL;
|
|
|
|
// Folding a memory location into the two-address part of a two-address
|
|
|
|
// instruction is different than folding it other places. It requires
|
|
|
|
// replacing the *two* registers with the memory location.
|
|
|
|
if (isTwoAddr && NumOps >= 2 && i < 2 &&
|
2008-10-03 15:45:36 +00:00
|
|
|
MI->getOperand(0).isReg() &&
|
|
|
|
MI->getOperand(1).isReg() &&
|
2008-01-07 01:35:02 +00:00
|
|
|
MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
|
|
|
|
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
|
|
|
|
isTwoAddrFold = true;
|
|
|
|
} else if (i == 0) { // If operand 0
|
|
|
|
if (MI->getOpcode() == X86::MOV16r0)
|
|
|
|
NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI);
|
|
|
|
else if (MI->getOpcode() == X86::MOV32r0)
|
|
|
|
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
|
|
|
|
else if (MI->getOpcode() == X86::MOV64r0)
|
|
|
|
NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI);
|
|
|
|
else if (MI->getOpcode() == X86::MOV8r0)
|
|
|
|
NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
|
2008-07-03 09:09:37 +00:00
|
|
|
if (NewMI)
|
2008-01-07 01:35:02 +00:00
|
|
|
return NewMI;
|
|
|
|
|
|
|
|
OpcodeTablePtr = &RegOp2MemOpTable0;
|
|
|
|
} else if (i == 1) {
|
|
|
|
OpcodeTablePtr = &RegOp2MemOpTable1;
|
|
|
|
} else if (i == 2) {
|
|
|
|
OpcodeTablePtr = &RegOp2MemOpTable2;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If table selected...
|
|
|
|
if (OpcodeTablePtr) {
|
|
|
|
// Find the Opcode to fuse
|
|
|
|
DenseMap<unsigned*, unsigned>::iterator I =
|
|
|
|
OpcodeTablePtr->find((unsigned*)MI->getOpcode());
|
|
|
|
if (I != OpcodeTablePtr->end()) {
|
|
|
|
if (isTwoAddrFold)
|
2008-07-07 23:14:23 +00:00
|
|
|
NewMI = FuseTwoAddrInst(MF, I->second, MOs, MI, *this);
|
2008-01-07 01:35:02 +00:00
|
|
|
else
|
2008-07-07 23:14:23 +00:00
|
|
|
NewMI = FuseInst(MF, I->second, i, MOs, MI, *this);
|
2008-01-07 01:35:02 +00:00
|
|
|
return NewMI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// No fusion
|
|
|
|
if (PrintFailedFusing)
|
2008-12-23 00:19:20 +00:00
|
|
|
cerr << "We failed to fuse operand " << i << " in " << *MI;
|
2008-01-07 01:35:02 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-12-03 18:43:12 +00:00
|
|
|
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
|
|
|
MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<unsigned> &Ops,
|
|
|
|
int FrameIndex) const {
|
2008-01-07 01:35:02 +00:00
|
|
|
// Check switch flag
|
|
|
|
if (NoFusing) return NULL;
|
|
|
|
|
2008-02-08 21:20:40 +00:00
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
|
|
|
|
// FIXME: Move alignment requirement into tables?
|
|
|
|
if (Alignment < 16) {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
// Not always safe to fold movsd into these instructions since their load
|
|
|
|
// folding variants expects the address to be 16 byte aligned.
|
|
|
|
case X86::FsANDNPDrr:
|
|
|
|
case X86::FsANDNPSrr:
|
|
|
|
case X86::FsANDPDrr:
|
|
|
|
case X86::FsANDPSrr:
|
|
|
|
case X86::FsORPDrr:
|
|
|
|
case X86::FsORPSrr:
|
|
|
|
case X86::FsXORPDrr:
|
|
|
|
case X86::FsXORPSrr:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-07 01:35:02 +00:00
|
|
|
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
|
|
|
|
unsigned NewOpc = 0;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: return NULL;
|
|
|
|
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
|
|
|
|
case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
|
|
|
|
case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
|
|
|
|
case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
|
|
|
|
}
|
|
|
|
// Change to CMPXXri r, 0 first.
|
2008-01-11 18:10:50 +00:00
|
|
|
MI->setDesc(get(NewOpc));
|
2008-01-07 01:35:02 +00:00
|
|
|
MI->getOperand(1).ChangeToImmediate(0);
|
|
|
|
} else if (Ops.size() != 1)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
SmallVector<MachineOperand,4> MOs;
|
|
|
|
MOs.push_back(MachineOperand::CreateFI(FrameIndex));
|
2008-12-03 18:43:12 +00:00
|
|
|
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs);
|
2008-01-07 01:35:02 +00:00
|
|
|
}
|
|
|
|
|
2008-12-03 18:43:12 +00:00
|
|
|
MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
|
|
|
MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<unsigned> &Ops,
|
|
|
|
MachineInstr *LoadMI) const {
|
2008-01-07 01:35:02 +00:00
|
|
|
// Check switch flag
|
|
|
|
if (NoFusing) return NULL;
|
|
|
|
|
2008-07-12 00:10:52 +00:00
|
|
|
// Determine the alignment of the load.
|
2008-02-08 21:20:40 +00:00
|
|
|
unsigned Alignment = 0;
|
2008-07-12 00:10:52 +00:00
|
|
|
if (LoadMI->hasOneMemOperand())
|
|
|
|
Alignment = LoadMI->memoperands_begin()->getAlignment();
|
2008-02-08 21:20:40 +00:00
|
|
|
|
|
|
|
// FIXME: Move alignment requirement into tables?
|
|
|
|
if (Alignment < 16) {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
// Not always safe to fold movsd into these instructions since their load
|
|
|
|
// folding variants expects the address to be 16 byte aligned.
|
|
|
|
case X86::FsANDNPDrr:
|
|
|
|
case X86::FsANDNPSrr:
|
|
|
|
case X86::FsANDPDrr:
|
|
|
|
case X86::FsANDPSrr:
|
|
|
|
case X86::FsORPDrr:
|
|
|
|
case X86::FsORPSrr:
|
|
|
|
case X86::FsXORPDrr:
|
|
|
|
case X86::FsXORPSrr:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-07 01:35:02 +00:00
|
|
|
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
|
|
|
|
unsigned NewOpc = 0;
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: return NULL;
|
|
|
|
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
|
|
|
|
case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
|
|
|
|
case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
|
|
|
|
case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
|
|
|
|
}
|
|
|
|
// Change to CMPXXri r, 0 first.
|
2008-01-11 18:10:50 +00:00
|
|
|
MI->setDesc(get(NewOpc));
|
2008-01-07 01:35:02 +00:00
|
|
|
MI->getOperand(1).ChangeToImmediate(0);
|
|
|
|
} else if (Ops.size() != 1)
|
|
|
|
return NULL;
|
|
|
|
|
2009-04-08 21:14:34 +00:00
|
|
|
SmallVector<MachineOperand,X86AddrNumOperands> MOs;
|
2008-12-03 05:21:24 +00:00
|
|
|
if (LoadMI->getOpcode() == X86::V_SET0 ||
|
|
|
|
LoadMI->getOpcode() == X86::V_SETALLONES) {
|
|
|
|
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
|
|
|
|
// Create a constant-pool entry and operands to load from it.
|
|
|
|
|
|
|
|
// x86-32 PIC requires a PIC base register for constant pools.
|
|
|
|
unsigned PICBase = 0;
|
|
|
|
if (TM.getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
!TM.getSubtarget<X86Subtarget>().is64Bit())
|
2008-12-05 17:23:48 +00:00
|
|
|
// FIXME: PICBase = TM.getInstrInfo()->getGlobalBaseReg(&MF);
|
|
|
|
// This doesn't work for several reasons.
|
|
|
|
// 1. GlobalBaseReg may have been spilled.
|
|
|
|
// 2. It may not be live at MI.
|
|
|
|
return false;
|
2008-12-03 05:21:24 +00:00
|
|
|
|
|
|
|
// Create a v4i32 constant-pool entry.
|
|
|
|
MachineConstantPool &MCP = *MF.getConstantPool();
|
|
|
|
const VectorType *Ty = VectorType::get(Type::Int32Ty, 4);
|
|
|
|
Constant *C = LoadMI->getOpcode() == X86::V_SET0 ?
|
|
|
|
ConstantVector::getNullValue(Ty) :
|
|
|
|
ConstantVector::getAllOnesValue(Ty);
|
2009-03-13 07:51:59 +00:00
|
|
|
unsigned CPI = MCP.getConstantPoolIndex(C, 16);
|
2008-12-03 05:21:24 +00:00
|
|
|
|
|
|
|
// Create operands to load from the constant pool entry.
|
|
|
|
MOs.push_back(MachineOperand::CreateReg(PICBase, false));
|
|
|
|
MOs.push_back(MachineOperand::CreateImm(1));
|
|
|
|
MOs.push_back(MachineOperand::CreateReg(0, false));
|
|
|
|
MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
|
2009-04-08 21:14:34 +00:00
|
|
|
MOs.push_back(MachineOperand::CreateReg(0, false));
|
2008-12-03 05:21:24 +00:00
|
|
|
} else {
|
|
|
|
// Folding a normal load. Just copy the load's address operands.
|
|
|
|
unsigned NumOps = LoadMI->getDesc().getNumOperands();
|
2009-03-27 15:57:50 +00:00
|
|
|
for (unsigned i = NumOps - X86AddrNumOperands; i != NumOps; ++i)
|
2008-12-03 05:21:24 +00:00
|
|
|
MOs.push_back(LoadMI->getOperand(i));
|
|
|
|
}
|
2008-12-03 18:43:12 +00:00
|
|
|
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs);
|
2008-01-07 01:35:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-10-16 01:49:15 +00:00
|
|
|
bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<unsigned> &Ops) const {
|
2008-01-07 01:35:02 +00:00
|
|
|
// Check switch flag
|
|
|
|
if (NoFusing) return 0;
|
|
|
|
|
|
|
|
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: return false;
|
|
|
|
case X86::TEST8rr:
|
|
|
|
case X86::TEST16rr:
|
|
|
|
case X86::TEST32rr:
|
|
|
|
case X86::TEST64rr:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Ops.size() != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned OpNum = Ops[0];
|
|
|
|
unsigned Opc = MI->getOpcode();
|
2008-01-07 07:27:27 +00:00
|
|
|
unsigned NumOps = MI->getDesc().getNumOperands();
|
2008-01-07 01:35:02 +00:00
|
|
|
bool isTwoAddr = NumOps > 1 &&
|
2008-01-07 07:27:27 +00:00
|
|
|
MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
|
2008-01-07 01:35:02 +00:00
|
|
|
|
|
|
|
// Folding a memory location into the two-address part of a two-address
|
|
|
|
// instruction is different than folding it other places. It requires
|
|
|
|
// replacing the *two* registers with the memory location.
|
|
|
|
const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL;
|
|
|
|
if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
|
|
|
|
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
|
|
|
|
} else if (OpNum == 0) { // If operand 0
|
|
|
|
switch (Opc) {
|
|
|
|
case X86::MOV16r0:
|
|
|
|
case X86::MOV32r0:
|
|
|
|
case X86::MOV64r0:
|
|
|
|
case X86::MOV8r0:
|
|
|
|
return true;
|
|
|
|
default: break;
|
|
|
|
}
|
|
|
|
OpcodeTablePtr = &RegOp2MemOpTable0;
|
|
|
|
} else if (OpNum == 1) {
|
|
|
|
OpcodeTablePtr = &RegOp2MemOpTable1;
|
|
|
|
} else if (OpNum == 2) {
|
|
|
|
OpcodeTablePtr = &RegOp2MemOpTable2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (OpcodeTablePtr) {
|
|
|
|
// Find the Opcode to fuse
|
|
|
|
DenseMap<unsigned*, unsigned>::iterator I =
|
|
|
|
OpcodeTablePtr->find((unsigned*)Opc);
|
|
|
|
if (I != OpcodeTablePtr->end())
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
|
|
|
|
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
|
2009-02-11 21:51:19 +00:00
|
|
|
SmallVectorImpl<MachineInstr*> &NewMIs) const {
|
2008-01-07 01:35:02 +00:00
|
|
|
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I =
|
|
|
|
MemOp2RegOpTable.find((unsigned*)MI->getOpcode());
|
|
|
|
if (I == MemOp2RegOpTable.end())
|
|
|
|
return false;
|
2009-02-12 23:08:38 +00:00
|
|
|
DebugLoc dl = MI->getDebugLoc();
|
2008-01-07 01:35:02 +00:00
|
|
|
unsigned Opc = I->second.first;
|
|
|
|
unsigned Index = I->second.second & 0xf;
|
|
|
|
bool FoldedLoad = I->second.second & (1 << 4);
|
|
|
|
bool FoldedStore = I->second.second & (1 << 5);
|
|
|
|
if (UnfoldLoad && !FoldedLoad)
|
|
|
|
return false;
|
|
|
|
UnfoldLoad &= FoldedLoad;
|
|
|
|
if (UnfoldStore && !FoldedStore)
|
|
|
|
return false;
|
|
|
|
UnfoldStore &= FoldedStore;
|
|
|
|
|
2008-01-07 07:27:27 +00:00
|
|
|
const TargetInstrDesc &TID = get(Opc);
|
2008-01-07 01:35:02 +00:00
|
|
|
const TargetOperandInfo &TOI = TID.OpInfo[Index];
|
2008-01-07 02:39:19 +00:00
|
|
|
const TargetRegisterClass *RC = TOI.isLookupPtrRegClass()
|
2009-02-06 17:43:24 +00:00
|
|
|
? RI.getPointerRegClass() : RI.getRegClass(TOI.RegClass);
|
2009-03-27 15:57:50 +00:00
|
|
|
SmallVector<MachineOperand, X86AddrNumOperands> AddrOps;
|
2008-01-07 01:35:02 +00:00
|
|
|
SmallVector<MachineOperand,2> BeforeOps;
|
|
|
|
SmallVector<MachineOperand,2> AfterOps;
|
|
|
|
SmallVector<MachineOperand,4> ImpOps;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &Op = MI->getOperand(i);
|
2009-03-27 15:57:50 +00:00
|
|
|
if (i >= Index && i < Index + X86AddrNumOperands)
|
2008-01-07 01:35:02 +00:00
|
|
|
AddrOps.push_back(Op);
|
2008-10-03 15:45:36 +00:00
|
|
|
else if (Op.isReg() && Op.isImplicit())
|
2008-01-07 01:35:02 +00:00
|
|
|
ImpOps.push_back(Op);
|
|
|
|
else if (i < Index)
|
|
|
|
BeforeOps.push_back(Op);
|
|
|
|
else if (i > Index)
|
|
|
|
AfterOps.push_back(Op);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the load instruction.
|
|
|
|
if (UnfoldLoad) {
|
|
|
|
loadRegFromAddr(MF, Reg, AddrOps, RC, NewMIs);
|
|
|
|
if (UnfoldStore) {
|
|
|
|
// Address operands cannot be marked isKill.
|
2009-03-27 15:57:50 +00:00
|
|
|
for (unsigned i = 1; i != 1 + X86AddrNumOperands; ++i) {
|
2008-01-07 01:35:02 +00:00
|
|
|
MachineOperand &MO = NewMIs[0]->getOperand(i);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isReg())
|
2008-01-07 01:35:02 +00:00
|
|
|
MO.setIsKill(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the data processing instruction.
|
2009-02-03 00:55:04 +00:00
|
|
|
MachineInstr *DataMI = MF.CreateMachineInstr(TID, MI->getDebugLoc(), true);
|
2008-01-07 01:35:02 +00:00
|
|
|
MachineInstrBuilder MIB(DataMI);
|
|
|
|
|
|
|
|
if (FoldedStore)
|
2009-05-13 21:33:08 +00:00
|
|
|
MIB.addReg(Reg, RegState::Define);
|
2008-01-07 01:35:02 +00:00
|
|
|
for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(BeforeOps[i]);
|
2008-01-07 01:35:02 +00:00
|
|
|
if (FoldedLoad)
|
|
|
|
MIB.addReg(Reg);
|
|
|
|
for (unsigned i = 0, e = AfterOps.size(); i != e; ++i)
|
2009-02-18 05:45:50 +00:00
|
|
|
MIB.addOperand(AfterOps[i]);
|
2008-01-07 01:35:02 +00:00
|
|
|
for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = ImpOps[i];
|
2009-05-13 21:33:08 +00:00
|
|
|
MIB.addReg(MO.getReg(),
|
|
|
|
getDefRegState(MO.isDef()) |
|
|
|
|
RegState::Implicit |
|
|
|
|
getKillRegState(MO.isKill()) |
|
2009-06-30 08:49:04 +00:00
|
|
|
getDeadRegState(MO.isDead()) |
|
|
|
|
getUndefRegState(MO.isUndef()));
|
2008-01-07 01:35:02 +00:00
|
|
|
}
|
|
|
|
// Change CMP32ri r, 0 back to TEST32rr r, r, etc.
|
|
|
|
unsigned NewOpc = 0;
|
|
|
|
switch (DataMI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case X86::CMP64ri32:
|
|
|
|
case X86::CMP32ri:
|
|
|
|
case X86::CMP16ri:
|
|
|
|
case X86::CMP8ri: {
|
|
|
|
MachineOperand &MO0 = DataMI->getOperand(0);
|
|
|
|
MachineOperand &MO1 = DataMI->getOperand(1);
|
|
|
|
if (MO1.getImm() == 0) {
|
|
|
|
switch (DataMI->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
|
|
|
|
case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
|
|
|
|
case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
|
|
|
|
case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
|
|
|
|
}
|
2008-01-11 18:10:50 +00:00
|
|
|
DataMI->setDesc(get(NewOpc));
|
2008-01-07 01:35:02 +00:00
|
|
|
MO1.ChangeToRegister(MO0.getReg(), false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
NewMIs.push_back(DataMI);
|
|
|
|
|
|
|
|
// Emit the store instruction.
|
|
|
|
if (UnfoldStore) {
|
|
|
|
const TargetOperandInfo &DstTOI = TID.OpInfo[0];
|
2008-01-07 02:39:19 +00:00
|
|
|
const TargetRegisterClass *DstRC = DstTOI.isLookupPtrRegClass()
|
2009-02-06 17:43:24 +00:00
|
|
|
? RI.getPointerRegClass() : RI.getRegClass(DstTOI.RegClass);
|
2008-01-07 01:35:02 +00:00
|
|
|
storeRegToAddr(MF, Reg, true, AddrOps, DstRC, NewMIs);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
|
2009-02-11 21:51:19 +00:00
|
|
|
SmallVectorImpl<SDNode*> &NewNodes) const {
|
2008-07-17 19:10:17 +00:00
|
|
|
if (!N->isMachineOpcode())
|
2008-01-07 01:35:02 +00:00
|
|
|
return false;
|
|
|
|
|
|
|
|
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I =
|
2008-07-17 19:10:17 +00:00
|
|
|
MemOp2RegOpTable.find((unsigned*)N->getMachineOpcode());
|
2008-01-07 01:35:02 +00:00
|
|
|
if (I == MemOp2RegOpTable.end())
|
|
|
|
return false;
|
|
|
|
unsigned Opc = I->second.first;
|
|
|
|
unsigned Index = I->second.second & 0xf;
|
|
|
|
bool FoldedLoad = I->second.second & (1 << 4);
|
|
|
|
bool FoldedStore = I->second.second & (1 << 5);
|
2008-01-07 07:27:27 +00:00
|
|
|
const TargetInstrDesc &TID = get(Opc);
|
2008-01-07 01:35:02 +00:00
|
|
|
const TargetOperandInfo &TOI = TID.OpInfo[Index];
|
2008-01-07 02:39:19 +00:00
|
|
|
const TargetRegisterClass *RC = TOI.isLookupPtrRegClass()
|
2009-02-06 17:43:24 +00:00
|
|
|
? RI.getPointerRegClass() : RI.getRegClass(TOI.RegClass);
|
2009-03-04 19:23:38 +00:00
|
|
|
unsigned NumDefs = TID.NumDefs;
|
2008-07-27 21:46:04 +00:00
|
|
|
std::vector<SDValue> AddrOps;
|
|
|
|
std::vector<SDValue> BeforeOps;
|
|
|
|
std::vector<SDValue> AfterOps;
|
2009-02-06 01:31:28 +00:00
|
|
|
DebugLoc dl = N->getDebugLoc();
|
2008-01-07 01:35:02 +00:00
|
|
|
unsigned NumOps = N->getNumOperands();
|
|
|
|
for (unsigned i = 0; i != NumOps-1; ++i) {
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Op = N->getOperand(i);
|
2009-03-27 15:57:50 +00:00
|
|
|
if (i >= Index-NumDefs && i < Index-NumDefs + X86AddrNumOperands)
|
2008-01-07 01:35:02 +00:00
|
|
|
AddrOps.push_back(Op);
|
2009-03-04 19:23:38 +00:00
|
|
|
else if (i < Index-NumDefs)
|
2008-01-07 01:35:02 +00:00
|
|
|
BeforeOps.push_back(Op);
|
2009-03-04 19:23:38 +00:00
|
|
|
else if (i > Index-NumDefs)
|
2008-01-07 01:35:02 +00:00
|
|
|
AfterOps.push_back(Op);
|
|
|
|
}
|
2008-07-27 21:46:04 +00:00
|
|
|
SDValue Chain = N->getOperand(NumOps-1);
|
2008-01-07 01:35:02 +00:00
|
|
|
AddrOps.push_back(Chain);
|
|
|
|
|
|
|
|
// Emit the load instruction.
|
|
|
|
SDNode *Load = 0;
|
2008-07-19 06:30:51 +00:00
|
|
|
const MachineFunction &MF = DAG.getMachineFunction();
|
2008-01-07 01:35:02 +00:00
|
|
|
if (FoldedLoad) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = *RC->vt_begin();
|
2008-07-21 06:34:17 +00:00
|
|
|
bool isAligned = (RI.getStackAlignment() >= 16) ||
|
|
|
|
RI.needsStackRealignment(MF);
|
2009-04-27 16:41:36 +00:00
|
|
|
Load = DAG.getTargetNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
|
|
|
|
VT, MVT::Other, &AddrOps[0], AddrOps.size());
|
2008-01-07 01:35:02 +00:00
|
|
|
NewNodes.push_back(Load);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Emit the data processing instruction.
|
2008-06-06 12:08:01 +00:00
|
|
|
std::vector<MVT> VTs;
|
2008-01-07 01:35:02 +00:00
|
|
|
const TargetRegisterClass *DstRC = 0;
|
2008-01-07 03:13:06 +00:00
|
|
|
if (TID.getNumDefs() > 0) {
|
2008-01-07 01:35:02 +00:00
|
|
|
const TargetOperandInfo &DstTOI = TID.OpInfo[0];
|
2008-01-07 02:39:19 +00:00
|
|
|
DstRC = DstTOI.isLookupPtrRegClass()
|
2009-02-06 17:43:24 +00:00
|
|
|
? RI.getPointerRegClass() : RI.getRegClass(DstTOI.RegClass);
|
2008-01-07 01:35:02 +00:00
|
|
|
VTs.push_back(*DstRC->vt_begin());
|
|
|
|
}
|
|
|
|
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
|
2008-06-06 12:08:01 +00:00
|
|
|
MVT VT = N->getValueType(i);
|
2008-01-07 03:13:06 +00:00
|
|
|
if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs())
|
2008-01-07 01:35:02 +00:00
|
|
|
VTs.push_back(VT);
|
|
|
|
}
|
|
|
|
if (Load)
|
2008-07-27 21:46:04 +00:00
|
|
|
BeforeOps.push_back(SDValue(Load, 0));
|
2008-01-07 01:35:02 +00:00
|
|
|
std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps));
|
2009-02-06 01:31:28 +00:00
|
|
|
SDNode *NewNode= DAG.getTargetNode(Opc, dl, VTs, &BeforeOps[0],
|
|
|
|
BeforeOps.size());
|
2008-01-07 01:35:02 +00:00
|
|
|
NewNodes.push_back(NewNode);
|
|
|
|
|
|
|
|
// Emit the store instruction.
|
|
|
|
if (FoldedStore) {
|
|
|
|
AddrOps.pop_back();
|
2008-07-27 21:46:04 +00:00
|
|
|
AddrOps.push_back(SDValue(NewNode, 0));
|
2008-01-07 01:35:02 +00:00
|
|
|
AddrOps.push_back(Chain);
|
2008-07-21 06:34:17 +00:00
|
|
|
bool isAligned = (RI.getStackAlignment() >= 16) ||
|
|
|
|
RI.needsStackRealignment(MF);
|
2009-04-27 16:41:36 +00:00
|
|
|
SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(0, DstRC,
|
|
|
|
isAligned, TM),
|
|
|
|
dl, MVT::Other,
|
|
|
|
&AddrOps[0], AddrOps.size());
|
2008-01-07 01:35:02 +00:00
|
|
|
NewNodes.push_back(Store);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
|
|
|
|
bool UnfoldLoad, bool UnfoldStore) const {
|
|
|
|
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I =
|
|
|
|
MemOp2RegOpTable.find((unsigned*)Opc);
|
|
|
|
if (I == MemOp2RegOpTable.end())
|
|
|
|
return 0;
|
|
|
|
bool FoldedLoad = I->second.second & (1 << 4);
|
|
|
|
bool FoldedStore = I->second.second & (1 << 5);
|
|
|
|
if (UnfoldLoad && !FoldedLoad)
|
|
|
|
return 0;
|
|
|
|
if (UnfoldStore && !FoldedStore)
|
|
|
|
return 0;
|
|
|
|
return I->second.first;
|
|
|
|
}
|
|
|
|
|
2008-10-16 01:49:15 +00:00
|
|
|
bool X86InstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
|
2006-10-28 17:29:57 +00:00
|
|
|
if (MBB.empty()) return false;
|
|
|
|
|
|
|
|
switch (MBB.back().getOpcode()) {
|
2007-10-11 19:40:01 +00:00
|
|
|
case X86::TCRETURNri:
|
|
|
|
case X86::TCRETURNdi:
|
2007-05-21 18:44:17 +00:00
|
|
|
case X86::RET: // Return.
|
|
|
|
case X86::RETI:
|
|
|
|
case X86::TAILJMPd:
|
|
|
|
case X86::TAILJMPr:
|
|
|
|
case X86::TAILJMPm:
|
2006-10-28 17:29:57 +00:00
|
|
|
case X86::JMP: // Uncond branch.
|
|
|
|
case X86::JMP32r: // Indirect branch.
|
2007-09-17 15:19:08 +00:00
|
|
|
case X86::JMP64r: // Indirect branch (64-bit).
|
2006-10-28 17:29:57 +00:00
|
|
|
case X86::JMP32m: // Indirect branch through mem.
|
2007-09-17 15:19:08 +00:00
|
|
|
case X86::JMP64m: // Indirect branch through mem (64-bit).
|
2006-10-28 17:29:57 +00:00
|
|
|
return true;
|
|
|
|
default: return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-20 17:42:20 +00:00
|
|
|
bool X86InstrInfo::
|
2008-08-14 22:49:33 +00:00
|
|
|
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
2006-10-21 05:52:40 +00:00
|
|
|
assert(Cond.size() == 1 && "Invalid X86 branch condition!");
|
2008-08-29 23:21:31 +00:00
|
|
|
X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
|
Optimized FCMP_OEQ and FCMP_UNE for x86.
Where previously LLVM might emit code like this:
ucomisd %xmm1, %xmm0
setne %al
setp %cl
orb %al, %cl
jne .LBB4_2
it now emits this:
ucomisd %xmm1, %xmm0
jne .LBB4_2
jp .LBB4_2
It has fewer instructions and uses fewer registers, but it does
have more branches. And in the case that this code is followed by
a non-fallthrough edge, it may be followed by a jmp instruction,
resulting in three branch instructions in sequence. Some effort
is made to avoid this situation.
To achieve this, X86ISelLowering.cpp now recognizes FCMP_OEQ and
FCMP_UNE in lowered form, and replace them with code that emits
two branches, except in the case where it would require converting
a fall-through edge to an explicit branch.
Also, X86InstrInfo.cpp's branch analysis and transform code now
knows now to handle blocks with multiple conditional branches. It
uses loops instead of having fixed checks for up to two
instructions. It can now analyze and transform code generated
from FCMP_OEQ and FCMP_UNE.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57873 91177308-0d34-0410-b5e6-96231b3b80d8
2008-10-21 03:29:32 +00:00
|
|
|
if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E)
|
|
|
|
return true;
|
2008-08-29 23:21:31 +00:00
|
|
|
Cond[0].setImm(GetOppositeBranchCondition(CC));
|
2006-10-21 05:52:40 +00:00
|
|
|
return false;
|
2006-10-20 17:42:20 +00:00
|
|
|
}
|
|
|
|
|
2008-10-27 07:14:50 +00:00
|
|
|
bool X86InstrInfo::
|
2009-02-06 17:17:30 +00:00
|
|
|
isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
|
|
|
|
// FIXME: Return false for x87 stack register classes for now. We can't
|
2008-10-27 07:14:50 +00:00
|
|
|
// allow any loads of these registers before FpGet_ST0_80.
|
2009-02-06 17:17:30 +00:00
|
|
|
return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass ||
|
|
|
|
RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass);
|
2008-10-27 07:14:50 +00:00
|
|
|
}
|
|
|
|
|
2008-04-16 20:10:13 +00:00
|
|
|
unsigned X86InstrInfo::sizeOfImm(const TargetInstrDesc *Desc) {
|
|
|
|
switch (Desc->TSFlags & X86II::ImmMask) {
|
|
|
|
case X86II::Imm8: return 1;
|
|
|
|
case X86II::Imm16: return 2;
|
|
|
|
case X86II::Imm32: return 4;
|
|
|
|
case X86II::Imm64: return 8;
|
|
|
|
default: assert(0 && "Immediate size not set!");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register?
|
|
|
|
/// e.g. r8, xmm8, etc.
|
|
|
|
bool X86InstrInfo::isX86_64ExtendedReg(const MachineOperand &MO) {
|
2008-10-03 15:45:36 +00:00
|
|
|
if (!MO.isReg()) return false;
|
2008-04-16 20:10:13 +00:00
|
|
|
switch (MO.getReg()) {
|
|
|
|
default: break;
|
|
|
|
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
|
|
|
|
case X86::R12: case X86::R13: case X86::R14: case X86::R15:
|
|
|
|
case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D:
|
|
|
|
case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D:
|
|
|
|
case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W:
|
|
|
|
case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W:
|
|
|
|
case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B:
|
|
|
|
case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
|
|
|
|
case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
|
|
|
|
case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64
|
|
|
|
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
|
|
|
|
/// size, and 3) use of X86-64 extended registers.
|
|
|
|
unsigned X86InstrInfo::determineREX(const MachineInstr &MI) {
|
|
|
|
unsigned REX = 0;
|
|
|
|
const TargetInstrDesc &Desc = MI.getDesc();
|
|
|
|
|
|
|
|
// Pseudo instructions do not need REX prefix byte.
|
|
|
|
if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo)
|
|
|
|
return 0;
|
|
|
|
if (Desc.TSFlags & X86II::REX_W)
|
|
|
|
REX |= 1 << 3;
|
|
|
|
|
|
|
|
unsigned NumOps = Desc.getNumOperands();
|
|
|
|
if (NumOps) {
|
|
|
|
bool isTwoAddr = NumOps > 1 &&
|
|
|
|
Desc.getOperandConstraint(1, TOI::TIED_TO) != -1;
|
|
|
|
|
|
|
|
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
|
|
|
|
unsigned i = isTwoAddr ? 1 : 0;
|
|
|
|
for (unsigned e = NumOps; i != e; ++i) {
|
|
|
|
const MachineOperand& MO = MI.getOperand(i);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isReg()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (isX86_64NonExtLowByteReg(Reg))
|
|
|
|
REX |= 0x40;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (Desc.TSFlags & X86II::FormMask) {
|
|
|
|
case X86II::MRMInitReg:
|
|
|
|
if (isX86_64ExtendedReg(MI.getOperand(0)))
|
|
|
|
REX |= (1 << 0) | (1 << 2);
|
|
|
|
break;
|
|
|
|
case X86II::MRMSrcReg: {
|
|
|
|
if (isX86_64ExtendedReg(MI.getOperand(0)))
|
|
|
|
REX |= 1 << 2;
|
|
|
|
i = isTwoAddr ? 2 : 1;
|
|
|
|
for (unsigned e = NumOps; i != e; ++i) {
|
|
|
|
const MachineOperand& MO = MI.getOperand(i);
|
|
|
|
if (isX86_64ExtendedReg(MO))
|
|
|
|
REX |= 1 << 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86II::MRMSrcMem: {
|
|
|
|
if (isX86_64ExtendedReg(MI.getOperand(0)))
|
|
|
|
REX |= 1 << 2;
|
|
|
|
unsigned Bit = 0;
|
|
|
|
i = isTwoAddr ? 2 : 1;
|
|
|
|
for (; i != NumOps; ++i) {
|
|
|
|
const MachineOperand& MO = MI.getOperand(i);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isReg()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
if (isX86_64ExtendedReg(MO))
|
|
|
|
REX |= 1 << Bit;
|
|
|
|
Bit++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86II::MRM0m: case X86II::MRM1m:
|
|
|
|
case X86II::MRM2m: case X86II::MRM3m:
|
|
|
|
case X86II::MRM4m: case X86II::MRM5m:
|
|
|
|
case X86II::MRM6m: case X86II::MRM7m:
|
|
|
|
case X86II::MRMDestMem: {
|
2009-04-13 15:04:25 +00:00
|
|
|
unsigned e = (isTwoAddr ? X86AddrNumOperands+1 : X86AddrNumOperands);
|
2008-04-16 20:10:13 +00:00
|
|
|
i = isTwoAddr ? 1 : 0;
|
|
|
|
if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e)))
|
|
|
|
REX |= 1 << 2;
|
|
|
|
unsigned Bit = 0;
|
|
|
|
for (; i != e; ++i) {
|
|
|
|
const MachineOperand& MO = MI.getOperand(i);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isReg()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
if (isX86_64ExtendedReg(MO))
|
|
|
|
REX |= 1 << Bit;
|
|
|
|
Bit++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default: {
|
|
|
|
if (isX86_64ExtendedReg(MI.getOperand(0)))
|
|
|
|
REX |= 1 << 0;
|
|
|
|
i = isTwoAddr ? 2 : 1;
|
|
|
|
for (unsigned e = NumOps; i != e; ++i) {
|
|
|
|
const MachineOperand& MO = MI.getOperand(i);
|
|
|
|
if (isX86_64ExtendedReg(MO))
|
|
|
|
REX |= 1 << 2;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return REX;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// sizePCRelativeBlockAddress - This method returns the size of a PC
|
|
|
|
/// relative block address instruction
|
|
|
|
///
|
|
|
|
static unsigned sizePCRelativeBlockAddress() {
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// sizeGlobalAddress - Give the size of the emission of this global address
|
|
|
|
///
|
|
|
|
static unsigned sizeGlobalAddress(bool dword) {
|
|
|
|
return dword ? 8 : 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// sizeConstPoolAddress - Give the size of the emission of this constant
|
|
|
|
/// pool address
|
|
|
|
///
|
|
|
|
static unsigned sizeConstPoolAddress(bool dword) {
|
|
|
|
return dword ? 8 : 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// sizeExternalSymbolAddress - Give the size of the emission of this external
|
|
|
|
/// symbol
|
|
|
|
///
|
|
|
|
static unsigned sizeExternalSymbolAddress(bool dword) {
|
|
|
|
return dword ? 8 : 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// sizeJumpTableAddress - Give the size of the emission of this jump
|
|
|
|
/// table address
|
|
|
|
///
|
|
|
|
static unsigned sizeJumpTableAddress(bool dword) {
|
|
|
|
return dword ? 8 : 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned sizeConstant(unsigned Size) {
|
|
|
|
return Size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned sizeRegModRMByte(){
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned sizeSIBByte(){
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getDisplacementFieldSize(const MachineOperand *RelocOp) {
|
|
|
|
unsigned FinalSize = 0;
|
|
|
|
// If this is a simple integer displacement that doesn't require a relocation.
|
|
|
|
if (!RelocOp) {
|
|
|
|
FinalSize += sizeConstant(4);
|
|
|
|
return FinalSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, this is something that requires a relocation.
|
2008-10-03 15:45:36 +00:00
|
|
|
if (RelocOp->isGlobal()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeGlobalAddress(false);
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (RelocOp->isCPI()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstPoolAddress(false);
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (RelocOp->isJTI()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeJumpTableAddress(false);
|
|
|
|
} else {
|
|
|
|
assert(0 && "Unknown value to relocate!");
|
|
|
|
}
|
|
|
|
return FinalSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getMemModRMByteSize(const MachineInstr &MI, unsigned Op,
|
|
|
|
bool IsPIC, bool Is64BitMode) {
|
|
|
|
const MachineOperand &Op3 = MI.getOperand(Op+3);
|
|
|
|
int DispVal = 0;
|
|
|
|
const MachineOperand *DispForReloc = 0;
|
|
|
|
unsigned FinalSize = 0;
|
|
|
|
|
|
|
|
// Figure out what sort of displacement we have to handle here.
|
2008-10-03 15:45:36 +00:00
|
|
|
if (Op3.isGlobal()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
DispForReloc = &Op3;
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (Op3.isCPI()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
if (Is64BitMode || IsPIC) {
|
|
|
|
DispForReloc = &Op3;
|
|
|
|
} else {
|
|
|
|
DispVal = 1;
|
|
|
|
}
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (Op3.isJTI()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
if (Is64BitMode || IsPIC) {
|
|
|
|
DispForReloc = &Op3;
|
|
|
|
} else {
|
|
|
|
DispVal = 1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
DispVal = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
const MachineOperand &Base = MI.getOperand(Op);
|
|
|
|
const MachineOperand &IndexReg = MI.getOperand(Op+2);
|
|
|
|
|
|
|
|
unsigned BaseReg = Base.getReg();
|
|
|
|
|
|
|
|
// Is a SIB byte needed?
|
2009-05-12 00:07:35 +00:00
|
|
|
if ((!Is64BitMode || DispForReloc || BaseReg != 0) &&
|
|
|
|
IndexReg.getReg() == 0 &&
|
2009-05-04 22:49:16 +00:00
|
|
|
(BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) {
|
2008-04-16 20:10:13 +00:00
|
|
|
if (BaseReg == 0) { // Just a displacement?
|
|
|
|
// Emit special case [disp32] encoding
|
|
|
|
++FinalSize;
|
|
|
|
FinalSize += getDisplacementFieldSize(DispForReloc);
|
|
|
|
} else {
|
|
|
|
unsigned BaseRegNo = X86RegisterInfo::getX86RegNum(BaseReg);
|
|
|
|
if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) {
|
|
|
|
// Emit simple indirect register encoding... [EAX] f.e.
|
|
|
|
++FinalSize;
|
|
|
|
// Be pessimistic and assume it's a disp32, not a disp8
|
|
|
|
} else {
|
|
|
|
// Emit the most general non-SIB encoding: [REG+disp32]
|
|
|
|
++FinalSize;
|
|
|
|
FinalSize += getDisplacementFieldSize(DispForReloc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else { // We need a SIB byte, so start by outputting the ModR/M byte first
|
|
|
|
assert(IndexReg.getReg() != X86::ESP &&
|
|
|
|
IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
|
|
|
|
|
|
|
|
bool ForceDisp32 = false;
|
|
|
|
if (BaseReg == 0 || DispForReloc) {
|
|
|
|
// Emit the normal disp32 encoding.
|
|
|
|
++FinalSize;
|
|
|
|
ForceDisp32 = true;
|
|
|
|
} else {
|
|
|
|
++FinalSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
FinalSize += sizeSIBByte();
|
|
|
|
|
|
|
|
// Do we need to output a displacement?
|
|
|
|
if (DispVal != 0 || ForceDisp32) {
|
|
|
|
FinalSize += getDisplacementFieldSize(DispForReloc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return FinalSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
|
|
|
|
const TargetInstrDesc *Desc,
|
|
|
|
bool IsPIC, bool Is64BitMode) {
|
|
|
|
|
|
|
|
unsigned Opcode = Desc->Opcode;
|
|
|
|
unsigned FinalSize = 0;
|
|
|
|
|
|
|
|
// Emit the lock opcode prefix as needed.
|
|
|
|
if (Desc->TSFlags & X86II::LOCK) ++FinalSize;
|
|
|
|
|
2009-05-28 23:40:46 +00:00
|
|
|
// Emit segment override opcode prefix as needed.
|
2008-10-12 10:30:11 +00:00
|
|
|
switch (Desc->TSFlags & X86II::SegOvrMask) {
|
|
|
|
case X86II::FS:
|
|
|
|
case X86II::GS:
|
|
|
|
++FinalSize;
|
|
|
|
break;
|
|
|
|
default: assert(0 && "Invalid segment!");
|
|
|
|
case 0: break; // No segment override!
|
|
|
|
}
|
|
|
|
|
2008-04-16 20:10:13 +00:00
|
|
|
// Emit the repeat opcode prefix as needed.
|
|
|
|
if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) ++FinalSize;
|
|
|
|
|
|
|
|
// Emit the operand size opcode prefix as needed.
|
|
|
|
if (Desc->TSFlags & X86II::OpSize) ++FinalSize;
|
|
|
|
|
|
|
|
// Emit the address size opcode prefix as needed.
|
|
|
|
if (Desc->TSFlags & X86II::AdSize) ++FinalSize;
|
|
|
|
|
|
|
|
bool Need0FPrefix = false;
|
|
|
|
switch (Desc->TSFlags & X86II::Op0Mask) {
|
|
|
|
case X86II::TB: // Two-byte opcode prefix
|
|
|
|
case X86II::T8: // 0F 38
|
|
|
|
case X86II::TA: // 0F 3A
|
|
|
|
Need0FPrefix = true;
|
|
|
|
break;
|
|
|
|
case X86II::REP: break; // already handled.
|
|
|
|
case X86II::XS: // F3 0F
|
|
|
|
++FinalSize;
|
|
|
|
Need0FPrefix = true;
|
|
|
|
break;
|
|
|
|
case X86II::XD: // F2 0F
|
|
|
|
++FinalSize;
|
|
|
|
Need0FPrefix = true;
|
|
|
|
break;
|
|
|
|
case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB:
|
|
|
|
case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF:
|
|
|
|
++FinalSize;
|
|
|
|
break; // Two-byte opcode prefix
|
|
|
|
default: assert(0 && "Invalid prefix!");
|
|
|
|
case 0: break; // No prefix!
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Is64BitMode) {
|
|
|
|
// REX prefix
|
|
|
|
unsigned REX = X86InstrInfo::determineREX(MI);
|
|
|
|
if (REX)
|
|
|
|
++FinalSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
// 0x0F escape code must be emitted just before the opcode.
|
|
|
|
if (Need0FPrefix)
|
|
|
|
++FinalSize;
|
|
|
|
|
|
|
|
switch (Desc->TSFlags & X86II::Op0Mask) {
|
|
|
|
case X86II::T8: // 0F 38
|
|
|
|
++FinalSize;
|
|
|
|
break;
|
2009-05-28 23:40:46 +00:00
|
|
|
case X86II::TA: // 0F 3A
|
2008-04-16 20:10:13 +00:00
|
|
|
++FinalSize;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a two-address instruction, skip one of the register operands.
|
|
|
|
unsigned NumOps = Desc->getNumOperands();
|
|
|
|
unsigned CurOp = 0;
|
|
|
|
if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1)
|
|
|
|
CurOp++;
|
2009-05-04 22:49:16 +00:00
|
|
|
else if (NumOps > 2 && Desc->getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
|
|
|
|
// Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
|
|
|
|
--NumOps;
|
2008-04-16 20:10:13 +00:00
|
|
|
|
|
|
|
switch (Desc->TSFlags & X86II::FormMask) {
|
|
|
|
default: assert(0 && "Unknown FormMask value in X86 MachineCodeEmitter!");
|
|
|
|
case X86II::Pseudo:
|
|
|
|
// Remember the current PC offset, this is the PIC relocation
|
|
|
|
// base address.
|
|
|
|
switch (Opcode) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
case TargetInstrInfo::INLINEASM: {
|
|
|
|
const MachineFunction *MF = MI.getParent()->getParent();
|
|
|
|
const char *AsmStr = MI.getOperand(0).getSymbolName();
|
|
|
|
const TargetAsmInfo* AI = MF->getTarget().getTargetAsmInfo();
|
|
|
|
FinalSize += AI->getInlineAsmLength(AsmStr);
|
|
|
|
break;
|
|
|
|
}
|
2008-07-01 00:05:16 +00:00
|
|
|
case TargetInstrInfo::DBG_LABEL:
|
|
|
|
case TargetInstrInfo::EH_LABEL:
|
2008-04-16 20:10:13 +00:00
|
|
|
break;
|
|
|
|
case TargetInstrInfo::IMPLICIT_DEF:
|
|
|
|
case TargetInstrInfo::DECLARE:
|
|
|
|
case X86::DWARF_LOC:
|
|
|
|
case X86::FP_REG_KILL:
|
|
|
|
break;
|
|
|
|
case X86::MOVPC32r: {
|
|
|
|
// This emits the "call" portion of this pseudo instruction.
|
|
|
|
++FinalSize;
|
|
|
|
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
CurOp = NumOps;
|
|
|
|
break;
|
|
|
|
case X86II::RawFrm:
|
|
|
|
++FinalSize;
|
|
|
|
|
|
|
|
if (CurOp != NumOps) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(CurOp++);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isMBB()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizePCRelativeBlockAddress();
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (MO.isGlobal()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeGlobalAddress(false);
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (MO.isSymbol()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeExternalSymbolAddress(false);
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (MO.isImm()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
|
|
|
|
} else {
|
|
|
|
assert(0 && "Unknown RawFrm operand!");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case X86II::AddRegFrm:
|
|
|
|
++FinalSize;
|
2008-04-20 23:36:47 +00:00
|
|
|
++CurOp;
|
2008-04-16 20:10:13 +00:00
|
|
|
|
|
|
|
if (CurOp != NumOps) {
|
|
|
|
const MachineOperand &MO1 = MI.getOperand(CurOp++);
|
|
|
|
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO1.isImm())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstant(Size);
|
|
|
|
else {
|
|
|
|
bool dword = false;
|
|
|
|
if (Opcode == X86::MOV64ri)
|
|
|
|
dword = true;
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO1.isGlobal()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeGlobalAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (MO1.isSymbol())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeExternalSymbolAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
else if (MO1.isCPI())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstPoolAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
else if (MO1.isJTI())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeJumpTableAddress(dword);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case X86II::MRMDestReg: {
|
|
|
|
++FinalSize;
|
|
|
|
FinalSize += sizeRegModRMByte();
|
|
|
|
CurOp += 2;
|
2008-04-20 23:36:47 +00:00
|
|
|
if (CurOp != NumOps) {
|
|
|
|
++CurOp;
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
|
2008-04-20 23:36:47 +00:00
|
|
|
}
|
2008-04-16 20:10:13 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case X86II::MRMDestMem: {
|
|
|
|
++FinalSize;
|
|
|
|
FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
|
2009-05-04 22:49:16 +00:00
|
|
|
CurOp += X86AddrNumOperands + 1;
|
2008-04-20 23:36:47 +00:00
|
|
|
if (CurOp != NumOps) {
|
|
|
|
++CurOp;
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
|
2008-04-20 23:36:47 +00:00
|
|
|
}
|
2008-04-16 20:10:13 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case X86II::MRMSrcReg:
|
|
|
|
++FinalSize;
|
|
|
|
FinalSize += sizeRegModRMByte();
|
|
|
|
CurOp += 2;
|
2008-04-20 23:36:47 +00:00
|
|
|
if (CurOp != NumOps) {
|
|
|
|
++CurOp;
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
|
2008-04-20 23:36:47 +00:00
|
|
|
}
|
2008-04-16 20:10:13 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case X86II::MRMSrcMem: {
|
2009-05-04 22:49:16 +00:00
|
|
|
int AddrOperands;
|
|
|
|
if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
|
|
|
|
Opcode == X86::LEA16r || Opcode == X86::LEA32r)
|
|
|
|
AddrOperands = X86AddrNumOperands - 1; // No segment register
|
|
|
|
else
|
|
|
|
AddrOperands = X86AddrNumOperands;
|
2008-04-16 20:10:13 +00:00
|
|
|
|
|
|
|
++FinalSize;
|
|
|
|
FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode);
|
2009-05-04 22:49:16 +00:00
|
|
|
CurOp += AddrOperands + 1;
|
2008-04-20 23:36:47 +00:00
|
|
|
if (CurOp != NumOps) {
|
|
|
|
++CurOp;
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
|
2008-04-20 23:36:47 +00:00
|
|
|
}
|
2008-04-16 20:10:13 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case X86II::MRM0r: case X86II::MRM1r:
|
|
|
|
case X86II::MRM2r: case X86II::MRM3r:
|
|
|
|
case X86II::MRM4r: case X86II::MRM5r:
|
|
|
|
case X86II::MRM6r: case X86II::MRM7r:
|
|
|
|
++FinalSize;
|
2009-05-04 22:49:16 +00:00
|
|
|
if (Desc->getOpcode() == X86::LFENCE ||
|
2009-05-28 23:40:46 +00:00
|
|
|
Desc->getOpcode() == X86::MFENCE) {
|
|
|
|
// Special handling of lfence and mfence;
|
2009-05-04 22:49:16 +00:00
|
|
|
FinalSize += sizeRegModRMByte();
|
2009-05-28 23:40:46 +00:00
|
|
|
} else if (Desc->getOpcode() == X86::MONITOR ||
|
|
|
|
Desc->getOpcode() == X86::MWAIT) {
|
|
|
|
// Special handling of monitor and mwait.
|
|
|
|
FinalSize += sizeRegModRMByte() + 1; // +1 for the opcode.
|
|
|
|
} else {
|
2009-05-04 22:49:16 +00:00
|
|
|
++CurOp;
|
|
|
|
FinalSize += sizeRegModRMByte();
|
|
|
|
}
|
2008-04-16 20:10:13 +00:00
|
|
|
|
|
|
|
if (CurOp != NumOps) {
|
|
|
|
const MachineOperand &MO1 = MI.getOperand(CurOp++);
|
|
|
|
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO1.isImm())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstant(Size);
|
|
|
|
else {
|
|
|
|
bool dword = false;
|
|
|
|
if (Opcode == X86::MOV64ri32)
|
|
|
|
dword = true;
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO1.isGlobal()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeGlobalAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (MO1.isSymbol())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeExternalSymbolAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
else if (MO1.isCPI())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstPoolAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
else if (MO1.isJTI())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeJumpTableAddress(dword);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case X86II::MRM0m: case X86II::MRM1m:
|
|
|
|
case X86II::MRM2m: case X86II::MRM3m:
|
|
|
|
case X86II::MRM4m: case X86II::MRM5m:
|
|
|
|
case X86II::MRM6m: case X86II::MRM7m: {
|
|
|
|
|
|
|
|
++FinalSize;
|
|
|
|
FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
|
2009-05-04 22:49:16 +00:00
|
|
|
CurOp += X86AddrNumOperands;
|
2008-04-16 20:10:13 +00:00
|
|
|
|
|
|
|
if (CurOp != NumOps) {
|
|
|
|
const MachineOperand &MO = MI.getOperand(CurOp++);
|
|
|
|
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isImm())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstant(Size);
|
|
|
|
else {
|
|
|
|
bool dword = false;
|
|
|
|
if (Opcode == X86::MOV64mi32)
|
|
|
|
dword = true;
|
2008-10-03 15:45:36 +00:00
|
|
|
if (MO.isGlobal()) {
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeGlobalAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
} else if (MO.isSymbol())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeExternalSymbolAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
else if (MO.isCPI())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeConstPoolAddress(dword);
|
2008-10-03 15:45:36 +00:00
|
|
|
else if (MO.isJTI())
|
2008-04-16 20:10:13 +00:00
|
|
|
FinalSize += sizeJumpTableAddress(dword);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case X86II::MRMInitReg:
|
|
|
|
++FinalSize;
|
|
|
|
// Duplicate register, used by things like MOV8r0 (aka xor reg,reg).
|
|
|
|
FinalSize += sizeRegModRMByte();
|
|
|
|
++CurOp;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Desc->isVariadic() && CurOp != NumOps) {
|
2009-07-08 18:01:40 +00:00
|
|
|
std::string msg;
|
|
|
|
raw_string_ostream Msg(msg);
|
|
|
|
Msg << "Cannot determine size: " << MI;
|
|
|
|
llvm_report_error(Msg.str());
|
2008-04-16 20:10:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
return FinalSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
|
|
|
|
const TargetInstrDesc &Desc = MI->getDesc();
|
|
|
|
bool IsPIC = (TM.getRelocationModel() == Reloc::PIC_);
|
2008-05-14 01:58:56 +00:00
|
|
|
bool Is64BitMode = TM.getSubtargetImpl()->is64Bit();
|
2008-04-16 20:10:13 +00:00
|
|
|
unsigned Size = GetInstSizeWithDesc(*MI, &Desc, IsPIC, Is64BitMode);
|
2009-06-25 17:28:07 +00:00
|
|
|
if (Desc.getOpcode() == X86::MOVPC32r)
|
2008-04-16 20:10:13 +00:00
|
|
|
Size += GetInstSizeWithDesc(*MI, &get(X86::POP32r), IsPIC, Is64BitMode);
|
|
|
|
return Size;
|
|
|
|
}
|
2008-09-23 18:22:58 +00:00
|
|
|
|
2008-09-30 00:58:23 +00:00
|
|
|
/// getGlobalBaseReg - Return a virtual register initialized with the
|
|
|
|
/// the global base register value. Output instructions required to
|
|
|
|
/// initialize the register in the function entry block, if necessary.
|
2008-09-23 18:22:58 +00:00
|
|
|
///
|
2008-09-30 00:58:23 +00:00
|
|
|
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
|
|
|
|
assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
|
|
|
|
"X86-64 PIC uses RIP relative addressing");
|
|
|
|
|
|
|
|
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
|
|
|
|
unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
|
|
|
|
if (GlobalBaseReg != 0)
|
|
|
|
return GlobalBaseReg;
|
|
|
|
|
2008-09-23 18:22:58 +00:00
|
|
|
// Insert the set of GlobalBaseReg into the first MBB of the function
|
|
|
|
MachineBasicBlock &FirstMBB = MF->front();
|
|
|
|
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
|
2009-02-11 21:51:19 +00:00
|
|
|
DebugLoc DL = DebugLoc::getUnknownLoc();
|
|
|
|
if (MBBI != FirstMBB.end()) DL = MBBI->getDebugLoc();
|
2008-09-23 18:22:58 +00:00
|
|
|
MachineRegisterInfo &RegInfo = MF->getRegInfo();
|
|
|
|
unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
|
|
|
|
|
|
|
|
const TargetInstrInfo *TII = TM.getInstrInfo();
|
|
|
|
// Operand of MovePCtoStack is completely ignored by asm printer. It's
|
|
|
|
// only used in JIT code emission as displacement to pc.
|
2009-06-25 17:38:33 +00:00
|
|
|
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
|
2008-09-23 18:22:58 +00:00
|
|
|
|
|
|
|
// If we're using vanilla 'GOT' PIC style, we should use relative addressing
|
2009-06-25 17:38:33 +00:00
|
|
|
// not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
|
2009-07-09 04:39:06 +00:00
|
|
|
if (TM.getSubtarget<X86Subtarget>().isPICStyleGOT()) {
|
2009-06-25 17:38:33 +00:00
|
|
|
GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
|
|
|
|
// Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
|
2009-02-11 21:51:19 +00:00
|
|
|
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
|
2009-06-25 17:38:33 +00:00
|
|
|
.addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_", 0,
|
|
|
|
X86II::MO_GOT_ABSOLUTE_ADDRESS);
|
2008-09-30 00:58:23 +00:00
|
|
|
} else {
|
|
|
|
GlobalBaseReg = PC;
|
2008-09-23 18:22:58 +00:00
|
|
|
}
|
|
|
|
|
2008-09-30 00:58:23 +00:00
|
|
|
X86FI->setGlobalBaseReg(GlobalBaseReg);
|
|
|
|
return GlobalBaseReg;
|
2008-09-23 18:22:58 +00:00
|
|
|
}
|