From 1a68958d3d9233bfe512de8ea7137067ef660345 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Wed, 25 Aug 2010 23:31:42 +0000 Subject: [PATCH] we should pattern match the SSE complex arithmetic ops. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@112109 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/README-SSE.txt | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/lib/Target/X86/README-SSE.txt b/lib/Target/X86/README-SSE.txt index 85cdd177819..f96b22f1e20 100644 --- a/lib/Target/X86/README-SSE.txt +++ b/lib/Target/X86/README-SSE.txt @@ -17,6 +17,32 @@ __m128i shift_right(__m128i value, unsigned long offset) { _mm_loadu_si128((__m128 *) (___m128i_shift_right + offset))); } +//===---------------------------------------------------------------------===// + +SSE has instructions for doing operations on complex numbers, we should pattern +match them. Compiling this: + +_Complex float f32(_Complex float A, _Complex float B) { + return A+B; +} + +into: + +_f32: + movdqa %xmm0, %xmm2 + addss %xmm1, %xmm2 + pshufd $16, %xmm2, %xmm2 + pshufd $1, %xmm1, %xmm1 + pshufd $1, %xmm0, %xmm0 + addss %xmm1, %xmm0 + pshufd $16, %xmm0, %xmm1 + movdqa %xmm2, %xmm0 + unpcklps %xmm1, %xmm0 + ret + +seems silly. + + //===---------------------------------------------------------------------===// Expand libm rounding functions inline: Significant speedups possible.