Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.

Before:
_main:
        subq    $8, %rsp
        leaq    _X(%rip), %rax
        movsd   8(%rax), %xmm1
        movss   _X(%rip), %xmm0
        call    _t
        xorl    %ecx, %ecx
        movl    %ecx, %eax
        addq    $8, %rsp
        ret
Now:
_main:
        subq    $8, %rsp
        movsd   _X+8(%rip), %xmm1
        movss   _X(%rip), %xmm0
        call    _t
        xorl    %ecx, %ecx
        movl    %ecx, %eax
        addq    $8, %rsp
        ret

Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl    %ecx, %ecx
movl    %ecx, %eax


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@46850 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2008-02-07 08:53:49 +00:00
parent 15246738f2
commit be3bf42331
7 changed files with 76 additions and 82 deletions

View File

@ -63,7 +63,7 @@ namespace {
int FrameIndex;
} Base;
bool isRIPRel; // RIP relative?
bool isRIPRel; // RIP as base?
unsigned Scale;
SDOperand IndexReg;
unsigned Disp;
@ -664,7 +664,9 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
case X86ISD::Wrapper: {
bool is64Bit = Subtarget->is64Bit();
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
if (is64Bit && TM.getCodeModel() != CodeModel::Small)
// Also, base and index reg must be 0 in order to use rip as base.
if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
AM.Base.Reg.Val || AM.IndexReg.Val))
break;
if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
break;
@ -672,39 +674,27 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
// been picked, we can't fit the result available in the register in the
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
bool isStatic = TM.getRelocationModel() == Reloc::Static;
SDOperand N0 = N.getOperand(0);
// Mac OS X X86-64 lower 4G address is not available.
bool isAbs32 = !is64Bit ||
(isStatic && Subtarget->hasLow4GUserSpaceAddress());
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
GlobalValue *GV = G->getGlobal();
if (isAbs32 || isRoot) {
AM.GV = GV;
AM.Disp += G->getOffset();
AM.isRIPRel = !isAbs32;
return false;
}
AM.GV = GV;
AM.Disp += G->getOffset();
AM.isRIPRel = is64Bit;
return false;
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
if (isAbs32 || isRoot) {
AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment();
AM.Disp += CP->getOffset();
AM.isRIPRel = !isAbs32;
return false;
}
AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment();
AM.Disp += CP->getOffset();
AM.isRIPRel = is64Bit;
return false;
} else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
if (isAbs32 || isRoot) {
AM.ES = S->getSymbol();
AM.isRIPRel = !isAbs32;
return false;
}
AM.ES = S->getSymbol();
AM.isRIPRel = is64Bit;
return false;
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
if (isAbs32 || isRoot) {
AM.JT = J->getIndex();
AM.isRIPRel = !isAbs32;
return false;
}
AM.JT = J->getIndex();
AM.isRIPRel = is64Bit;
return false;
}
}
break;
@ -719,7 +709,7 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
break;
case ISD::SHL:
if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1)
if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1 || AM.isRIPRel)
break;
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
@ -759,7 +749,8 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
if (!AlreadySelected &&
AM.BaseType == X86ISelAddressMode::RegBase &&
AM.Base.Reg.Val == 0 &&
AM.IndexReg.Val == 0) {
AM.IndexReg.Val == 0 &&
!AM.isRIPRel) {
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
AM.Scale = unsigned(CN->getValue())-1;
@ -834,6 +825,9 @@ bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
// Scale must not be used already.
if (AM.IndexReg.Val != 0 || AM.Scale != 1) break;
// Not when RIP is used as the base.
if (AM.isRIPRel) break;
ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
@ -874,7 +868,7 @@ bool X86DAGToDAGISel::MatchAddressBase(SDOperand N, X86ISelAddressMode &AM,
// Is the base register already occupied?
if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
// If so, check to see if the scale index register is set.
if (AM.IndexReg.Val == 0) {
if (AM.IndexReg.Val == 0 && !AM.isRIPRel) {
AM.IndexReg = N;
AM.Scale = 1;
return false;

View File

@ -1125,16 +1125,16 @@ def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tconstpool:$src)>,
Requires<[SmallCode, HasLow4G, IsStatic]>;
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tjumptable:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tjumptable:$src)>,
Requires<[SmallCode, HasLow4G, IsStatic]>;
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper tglobaladdr:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tglobaladdr:$src)>,
Requires<[SmallCode, HasLow4G, IsStatic]>;
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, texternalsym:$src)>,
Requires<[SmallCode, HasLow4G, IsStatic]>;
Requires<[SmallCode, IsStatic]>;
// Calls
// Direct PC relative function call for small code model. 32-bit displacement

View File

@ -172,7 +172,6 @@ def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">;
def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
def In32BitMode : Predicate<"!Subtarget->is64Bit()">;
def In64BitMode : Predicate<"Subtarget->is64Bit()">;
def HasLow4G : Predicate<"Subtarget->hasLow4GUserSpaceAddress()">;
def SmallCode : Predicate<"TM.getCodeModel() == CodeModel::Small">;
def NotSmallCode : Predicate<"TM.getCodeModel() != CodeModel::Small">;
def IsStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">;

View File

@ -228,7 +228,6 @@ X86Subtarget::X86Subtarget(const Module &M, const std::string &FS, bool is64Bit)
// FIXME: this is a known good value for Yonah. How about others?
, MaxInlineSizeThreshold(128)
, Is64Bit(is64Bit)
, HasLow4GUserAddress(true)
, TargetType(isELF) { // Default to ELF unless otherwise specified.
// Determine default and user specified characteristics
@ -300,9 +299,6 @@ X86Subtarget::X86Subtarget(const Module &M, const std::string &FS, bool is64Bit)
? X86Subtarget::Intel : X86Subtarget::ATT;
}
if (TargetType == isDarwin && Is64Bit)
HasLow4GUserAddress = false;
if (TargetType == isDarwin ||
TargetType == isCygwin ||
TargetType == isMingw ||

View File

@ -82,10 +82,6 @@ private:
/// pointer size is 64 bit.
bool Is64Bit;
/// HasLow4GUserAddress - True if the low 4G user-space address is available.
///
bool HasLow4GUserAddress;
public:
enum {
isELF, isCygwin, isDarwin, isWindows, isMingw
@ -115,10 +111,6 @@ public:
bool is64Bit() const { return Is64Bit; }
/// hasLow4GUserSpaceAddress - True if lower 4G user-space address is
/// available.
bool hasLow4GUserSpaceAddress() const { return HasLow4GUserAddress; }
PICStyle::Style getPICStyle() const { return PICStyle; }
void setPICStyle(PICStyle::Style Style) { PICStyle = Style; }

View File

@ -1,36 +1,35 @@
; RUN: llvm-upgrade < %s | llvm-as | llc -relocation-model=pic -mtriple=i386-linux-gnu | not grep -F .text
; RUN: llvm-upgrade < %s | llvm-as | llc -relocation-model=pic -mtriple=i686-apple-darwin | not grep lea
; RUN: llvm-as < %s | llc -relocation-model=pic -mtriple=i386-linux-gnu | not grep -F .text
; RUN: llvm-as < %s | llc -relocation-model=pic -mtriple=i686-apple-darwin | not grep lea
; RUN: llvm-as < %s | llc -relocation-model=pic -mtriple=i686-apple-darwin | grep add | count 2
implementation ; Functions:
declare void @_Z3bari(i32)
declare void %_Z3bari( int )
linkonce void %_Z3fooILi1EEvi(int %Y) {
define linkonce void @_Z3fooILi1EEvi(i32 %Y) {
entry:
%Y_addr = alloca int ; <int*> [#uses=2]
"alloca point" = cast int 0 to int ; <int> [#uses=0]
store int %Y, int* %Y_addr
%tmp = load int* %Y_addr ; <int> [#uses=1]
switch int %tmp, label %bb10 [
int 0, label %bb3
int 1, label %bb
int 2, label %bb
int 3, label %bb
int 4, label %bb
int 5, label %bb
int 6, label %bb
int 7, label %bb
int 8, label %bb
int 9, label %bb
int 10, label %bb
int 12, label %bb1
int 13, label %bb5
int 14, label %bb6
int 16, label %bb2
int 17, label %bb4
int 23, label %bb8
int 27, label %bb7
int 34, label %bb9
%Y_addr = alloca i32 ; <i32*> [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %Y, i32* %Y_addr
%tmp = load i32* %Y_addr ; <i32> [#uses=1]
switch i32 %tmp, label %bb10 [
i32 0, label %bb3
i32 1, label %bb
i32 2, label %bb
i32 3, label %bb
i32 4, label %bb
i32 5, label %bb
i32 6, label %bb
i32 7, label %bb
i32 8, label %bb
i32 9, label %bb
i32 10, label %bb
i32 12, label %bb1
i32 13, label %bb5
i32 14, label %bb6
i32 16, label %bb2
i32 17, label %bb4
i32 23, label %bb8
i32 27, label %bb7
i32 34, label %bb9
]
bb: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry
@ -40,7 +39,7 @@ bb1: ; preds = %bb, %entry
br label %bb2
bb2: ; preds = %bb1, %entry
call void %_Z3bari( int 1 )
call void @_Z3bari( i32 1 )
br label %bb11
bb3: ; preds = %entry
@ -53,7 +52,7 @@ bb5: ; preds = %bb4, %entry
br label %bb6
bb6: ; preds = %bb5, %entry
call void %_Z3bari( int 2 )
call void @_Z3bari( i32 2 )
br label %bb11
bb7: ; preds = %entry
@ -63,7 +62,7 @@ bb8: ; preds = %bb7, %entry
br label %bb9
bb9: ; preds = %bb8, %entry
call void %_Z3bari( int 3 )
call void @_Z3bari( i32 3 )
br label %bb11
bb10: ; preds = %entry

View File

@ -0,0 +1,14 @@
; RUN: llvm-as < %s | llc -mtriple=x86_64-apple-darwin | not grep lea
%struct.x = type { float, double }
@X = global %struct.x { float 1.000000e+00, double 2.000000e+00 }, align 16 ; <%struct.x*> [#uses=2]
define i32 @main() nounwind {
entry:
%tmp2 = load float* getelementptr (%struct.x* @X, i32 0, i32 0), align 16 ; <float> [#uses=1]
%tmp4 = load double* getelementptr (%struct.x* @X, i32 0, i32 1), align 8 ; <double> [#uses=1]
tail call void @t( float %tmp2, double %tmp4 ) nounwind
ret i32 0
}
declare void @t(float, double)