mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-15 20:29:48 +00:00
X86: Don't fold spills into SSE operations if the stack is unaligned.
Regalloc can emit unaligned spills nowadays, but we can't fold the spills into SSE ops if we can't guarantee alignment. PR12250. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192064 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
714319a169
commit
d9f7a185e3
@ -4166,6 +4166,10 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
unsigned Size = MFI->getObjectSize(FrameIndex);
|
||||
unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
|
||||
// If the function stack isn't realigned we don't want to fold instructions
|
||||
// that need increased alignment.
|
||||
if (!RI.needsStackRealignment(MF))
|
||||
Alignment = std::min(Alignment, TM.getFrameLowering()->getStackAlignment());
|
||||
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
|
||||
unsigned NewOpc = 0;
|
||||
unsigned RCSize = 0;
|
||||
|
49
test/CodeGen/X86/unaligned-spill-folding.ll
Normal file
49
test/CodeGen/X86/unaligned-spill-folding.ll
Normal file
@ -0,0 +1,49 @@
|
||||
; RUN: llc -mtriple=i386-unknown-freebsd -mattr=sse2 -stack-alignment=4 -relocation-model=pic < %s | FileCheck %s -check-prefix=UNALIGNED
|
||||
; RUN: llc -mtriple=i386-unknown-freebsd -mattr=sse2 -stack-alignment=16 -relocation-model=pic < %s | FileCheck %s -check-prefix=ALIGNED
|
||||
; RUN: llc -mtriple=i386-unknown-freebsd -mattr=sse2 -stack-alignment=4 -force-align-stack -relocation-model=pic < %s | FileCheck %s -check-prefix=FORCEALIGNED
|
||||
|
||||
@arr = internal unnamed_addr global [32 x i32] zeroinitializer, align 16
|
||||
|
||||
; PR12250
|
||||
define i32 @test1() {
|
||||
vector.ph:
|
||||
br label %vector.body
|
||||
|
||||
vector.body:
|
||||
%index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
|
||||
%0 = getelementptr inbounds [32 x i32]* @arr, i32 0, i32 %index
|
||||
%1 = bitcast i32* %0 to <4 x i32>*
|
||||
%wide.load = load <4 x i32>* %1, align 16
|
||||
%2 = add nsw <4 x i32> %wide.load, <i32 10, i32 10, i32 10, i32 10>
|
||||
%3 = xor <4 x i32> %2, <i32 123345, i32 123345, i32 123345, i32 123345>
|
||||
%4 = add nsw <4 x i32> %3, <i32 112, i32 112, i32 112, i32 112>
|
||||
%5 = xor <4 x i32> %4, <i32 543345, i32 543345, i32 543345, i32 543345>
|
||||
%6 = add nsw <4 x i32> %5, <i32 73, i32 73, i32 73, i32 73>
|
||||
%7 = xor <4 x i32> %6, <i32 345987, i32 345987, i32 345987, i32 345987>
|
||||
%8 = add nsw <4 x i32> %7, <i32 48, i32 48, i32 48, i32 48>
|
||||
%9 = xor <4 x i32> %8, <i32 123987, i32 123987, i32 123987, i32 123987>
|
||||
store <4 x i32> %9, <4 x i32>* %1, align 16
|
||||
%index.next = add i32 %index, 4
|
||||
%10 = icmp eq i32 %index.next, 32
|
||||
br i1 %10, label %middle.block, label %vector.body
|
||||
|
||||
middle.block:
|
||||
ret i32 0
|
||||
|
||||
; We can't fold the spill into a padd unless the stack is aligned. Just spilling
|
||||
; doesn't force stack realignment though
|
||||
; UNALIGNED-LABEL: @test1
|
||||
; UNALIGNED-NOT: andl $-{{..}}, %esp
|
||||
; UNALIGNED: movdqu {{.*}} # 16-byte Folded Spill
|
||||
; UNALIGNED-NOT: paddd {{.*}} # 16-byte Folded Reload
|
||||
|
||||
; ALIGNED-LABEL: @test1
|
||||
; ALIGNED-NOT: andl $-{{..}}, %esp
|
||||
; ALIGNED: movdqa {{.*}} # 16-byte Spill
|
||||
; ALIGNED: paddd {{.*}} # 16-byte Folded Reload
|
||||
|
||||
; FORCEALIGNED-LABEL: @test1
|
||||
; FORCEALIGNED: andl $-{{..}}, %esp
|
||||
; FORCEALIGNED: movdqa {{.*}} # 16-byte Spill
|
||||
; FORCEALIGNED: paddd {{.*}} # 16-byte Folded Reload
|
||||
}
|
Loading…
Reference in New Issue
Block a user