Instruction select globals with offsets better. For example, on this test

case:

int C[100];
int foo() {
  return C[4];
}

We now codegen:

foo:
        mov %EAX, DWORD PTR [C + 16]
        ret

instead of:

foo:
        mov %EAX, OFFSET C
        mov %EAX, DWORD PTR [%EAX + 16]
        ret

Other impressive features may be coming later.

This patch is contributed by Jeff Cohen!


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@17011 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Chris Lattner 2004-10-15 05:05:29 +00:00
parent 8cce7cd0ae
commit 358a9027a8

View File

@ -826,6 +826,9 @@ void X86ISel::getAddressingMode(Value *Addr, X86AddressMode &AM) {
AM.BaseType = X86AddressMode::FrameIndexBase; AM.BaseType = X86AddressMode::FrameIndexBase;
AM.Base.FrameIndex = getFixedSizedAllocaFI(AI); AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
return; return;
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
AM.GV = GV;
return;
} }
// If it's not foldable, reset addr mode. // If it's not foldable, reset addr mode.
@ -3108,7 +3111,7 @@ void X86ISel::visitStoreInst(StoreInst &I) {
addFullAddress(BuildMI(BB, Opcode, 5), AM).addImm(Val); addFullAddress(BuildMI(BB, Opcode, 5), AM).addImm(Val);
} }
} else if (isa<ConstantPointerNull>(I.getOperand(0))) { } else if (isa<ConstantPointerNull>(I.getOperand(0))) {
addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(0); addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(0);
} else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) { } else if (ConstantBool *CB = dyn_cast<ConstantBool>(I.getOperand(0))) {
addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CB->getValue()); addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CB->getValue());
} else if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) { } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(I.getOperand(0))) {
@ -3139,6 +3142,11 @@ void X86ISel::visitStoreInst(StoreInst &I) {
AM.Disp += 4; AM.Disp += 4;
addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg+1); addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg+1);
} else { } else {
// FIXME: stop emitting these two instructions:
// movl $global,%eax
// movl %eax,(%ebx)
// when one instruction will suffice. That includes when the global
// has an offset applied to it.
unsigned ValReg = getReg(I.getOperand(0)); unsigned ValReg = getReg(I.getOperand(0));
static const unsigned Opcodes[] = { static const unsigned Opcodes[] = {
X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m X86::MOV8mr, X86::MOV16mr, X86::MOV32mr, X86::FST32m
@ -3663,12 +3671,11 @@ void X86ISel::getGEPIndex(MachineBasicBlock *MBB,
return; return;
} }
#if 0 // FIXME: TODO! if (GlobalValue *GV = dyn_cast<GlobalValue>(GEPOps.back())) {
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { AM.GV = GV;
// FIXME: When addressing modes are more powerful/correct, we could load GEPOps.pop_back();
// global addresses directly as 32-bit immediates. return;
} }
#endif
AM.Base.Reg = MBB ? getReg(GEPOps[0], MBB, IP) : 1; AM.Base.Reg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
GEPOps.pop_back(); // Consume the last GEP operand GEPOps.pop_back(); // Consume the last GEP operand
@ -3744,8 +3751,11 @@ void X86ISel::emitGEPOperation(MachineBasicBlock *MBB,
} }
if (AM.BaseType == X86AddressMode::RegBase && if (AM.BaseType == X86AddressMode::RegBase &&
AM.IndexReg == 0 && AM.Disp == 0) AM.IndexReg == 0 && AM.Disp == 0 && !AM.GV)
BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(AM.Base.Reg); BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(AM.Base.Reg);
else if (AM.BaseType == X86AddressMode::RegBase && AM.Base.Reg == 0 &&
AM.IndexReg == 0 && AM.Disp == 0)
BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addGlobalAddress(AM.GV);
else else
addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg), AM); addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg), AM);
--IP; --IP;