llvm-6502/test/CodeGen/X86/store-narrow.ll
Chris Lattner 6dc868581b teach codegen to turn trunc(zextload) into load when possible.
This doesn't occur much at all, it only seems to formed in the case
when the trunc optimization kicks in due to phase ordering.  In that
case it is saves a few bytes on x86-32.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@101350 91177308-0d34-0410-b5e6-96231b3b80d8
2010-04-15 05:40:59 +00:00

108 lines
2.5 KiB
LLVM

; rdar://7860110
; RUN: llc < %s | FileCheck %s -check-prefix=X64
; RUN: llc -march=x86 < %s | FileCheck %s -check-prefix=X32
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
target triple = "x86_64-apple-darwin10.2"
define void @test1(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
entry:
%A = load i32* %a0, align 4
%B = and i32 %A, -256 ; 0xFFFFFF00
%C = zext i8 %a1 to i32
%D = or i32 %C, %B
store i32 %D, i32* %a0, align 4
ret void
; X64: test1:
; X64: movb %sil, (%rdi)
; X32: test1:
; X32: movb 8(%esp), %al
; X32: movb %al, (%{{.*}})
}
define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp {
entry:
%A = load i32* %a0, align 4
%B = and i32 %A, -65281 ; 0xFFFF00FF
%C = zext i8 %a1 to i32
%CS = shl i32 %C, 8
%D = or i32 %B, %CS
store i32 %D, i32* %a0, align 4
ret void
; X64: test2:
; X64: movb %sil, 1(%rdi)
; X32: test2:
; X32: movb 8(%esp), %al
; X32: movb %al, 1(%{{.*}})
}
define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
entry:
%A = load i32* %a0, align 4
%B = and i32 %A, -65536 ; 0xFFFF0000
%C = zext i16 %a1 to i32
%D = or i32 %B, %C
store i32 %D, i32* %a0, align 4
ret void
; X64: test3:
; X64: movw %si, (%rdi)
; X32: test3:
; X32: movw 8(%esp), %ax
; X32: movw %ax, (%{{.*}})
}
define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp {
entry:
%A = load i32* %a0, align 4
%B = and i32 %A, 65535 ; 0x0000FFFF
%C = zext i16 %a1 to i32
%CS = shl i32 %C, 16
%D = or i32 %B, %CS
store i32 %D, i32* %a0, align 4
ret void
; X64: test4:
; X64: movw %si, 2(%rdi)
; X32: test4:
; X32: movw 8(%esp), %ax
; X32: movw %ax, 2(%{{.*}})
}
define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp {
entry:
%A = load i64* %a0, align 4
%B = and i64 %A, -4294901761 ; 0xFFFFFFFF0000FFFF
%C = zext i16 %a1 to i64
%CS = shl i64 %C, 16
%D = or i64 %B, %CS
store i64 %D, i64* %a0, align 4
ret void
; X64: test5:
; X64: movw %si, 2(%rdi)
; X32: test5:
; X32: movw 8(%esp), %ax
; X32: movw %ax, 2(%{{.*}})
}
define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp {
entry:
%A = load i64* %a0, align 4
%B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF
%C = zext i8 %a1 to i64
%CS = shl i64 %C, 40
%D = or i64 %B, %CS
store i64 %D, i64* %a0, align 4
ret void
; X64: test6:
; X64: movb %sil, 5(%rdi)
; X32: test6:
; X32: movb 8(%esp), %al
; X32: movb %al, 5(%{{.*}})
}