mirror of
https://github.com/c64scene-ar/llvm-6502.git
synced 2024-12-26 05:32:25 +00:00
cf9661c6f9
Remove a block of code from LowerSIGN_EXTEND_INREG() that was added with: http://llvm.org/viewvc/llvm-project?view=revision&revision=177421 And caused: http://llvm.org/bugs/show_bug.cgi?id=20472 (more analysis here) http://llvm.org/bugs/show_bug.cgi?id=18054 The testcases confirm that we (1) don't remove a zext op that is necessary and (2) generate a pmovz instead of punpck if SSE4.1 is available. Although pmovz is 1 byte longer, it allows folding of the load, and so saves 3 bytes overall. Differential Revision: http://reviews.llvm.org/D4909 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@216679 91177308-0d34-0410-b5e6-96231b3b80d8
31 lines
1.1 KiB
LLVM
31 lines
1.1 KiB
LLVM
; RUN: llc %s -mtriple=x86_64-unknown-unknown -mattr='-sse4.1' -o - | FileCheck %s -check-prefix=NO_SSE_41
|
|
; RUN: llc %s -mtriple=x86_64-unknown-unknown -mattr='+sse4.1' -o - | FileCheck %s -check-prefix=SSE_41
|
|
|
|
; PR20472 ( http://llvm.org/bugs/show_bug.cgi?id=20472 )
|
|
; When sexting a trunc'd vector value, we can't eliminate the zext.
|
|
; If we don't have SSE4.1, use punpck.
|
|
; If we have SSE4.1, use pmovzx because it combines the load op.
|
|
; There may be a better way to do this using pshufb + pmovsx,
|
|
; but that is beyond our current codegen capabilities.
|
|
|
|
define <4 x i32> @trunc_sext(<4 x i16>* %in) {
|
|
%load = load <4 x i16>* %in
|
|
%trunc = trunc <4 x i16> %load to <4 x i8>
|
|
%sext = sext <4 x i8> %trunc to <4 x i32>
|
|
ret <4 x i32> %sext
|
|
|
|
; NO_SSE_41-LABEL: trunc_sext:
|
|
; NO_SSE_41: movq (%rdi), %xmm0
|
|
; NO_SSE_41-NEXT: punpcklwd %xmm0, %xmm0
|
|
; NO_SSE_41-NEXT: pslld $24, %xmm0
|
|
; NO_SSE_41-NEXT: psrad $24, %xmm0
|
|
; NO_SSE_41-NEXT: retq
|
|
|
|
; SSE_41-LABEL: trunc_sext:
|
|
; SSE_41: pmovzxwd (%rdi), %xmm0
|
|
; SSE_41-NEXT: pslld $24, %xmm0
|
|
; SSE_41-NEXT: psrad $24, %xmm0
|
|
; SSE_41-NEXT: retq
|
|
}
|
|
|