llvm-6502/test/CodeGen/X86/x86-64-double-shifts-var.ll
Ekaterina Romanova 46f7257ed1 SHLD/SHRD are VectorPath (microcode) instructions known to have poor latency on certain architectures. While generating SHLD/SHRD instructions is acceptable when optimizing for size, optimizing for speed on these platforms should be implemented using alternative sequences of instructions composed of add, adc, shr, shl, or and lea which are directPath instructions. These alternative instructions not only have a lower latency but they also increase the decode bandwidth by allowing simultaneous decoding of a third directPath instruction.
AMD's processors family K7, K8, K10, K12, K15 and K16 are known to have SHLD/SHRD instructions with very poor latency. Optimization guides for these processors recommend using an alternative sequence of instructions. For these AMD's processors, I disabled folding (or (x << c) | (y >> (64 - c))) when we are not optimizing for size.

It might be beneficial to disable this folding for some of the Intel's processors. However, since I couldn't find specific recommendations regarding using SHLD/SHRD instructions on Intel's processors, I haven't disabled this peephole for Intel.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@195383 91177308-0d34-0410-b5e6-96231b3b80d8
2013-11-21 23:21:26 +00:00

58 lines
1.9 KiB
LLVM

; RUN: llc < %s -march=x86-64 -mcpu=athlon | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=athlon-tbird | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=athlon-4 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=athlon-xp | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=athlon-mp | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=k8 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=opteron | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=athlon64 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=athlon-fx | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=k8-sse3 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=opteron-sse3 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=athlon64-sse3 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=amdfam10 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=btver1 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=btver2 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=bdver1 | FileCheck %s
; RUN: llc < %s -march=x86-64 -mcpu=bdver2 | FileCheck %s
; Verify that for the X86_64 processors that are known to have poor latency
; double precision shift instructions we do not generate 'shld' or 'shrd'
; instructions.
;uint64_t lshift(uint64_t a, uint64_t b, int c)
;{
; return (a << c) | (b >> (64-c));
;}
define i64 @lshift(i64 %a, i64 %b, i32 %c) nounwind readnone {
entry:
; CHECK-NOT: shld
%sh_prom = zext i32 %c to i64
%shl = shl i64 %a, %sh_prom
%sub = sub nsw i32 64, %c
%sh_prom1 = zext i32 %sub to i64
%shr = lshr i64 %b, %sh_prom1
%or = or i64 %shr, %shl
ret i64 %or
}
;uint64_t rshift(uint64_t a, uint64_t b, int c)
;{
; return (a >> c) | (b << (64-c));
;}
define i64 @rshift(i64 %a, i64 %b, i32 %c) nounwind readnone {
entry:
; CHECK-NOT: shrd
%sh_prom = zext i32 %c to i64
%shr = lshr i64 %a, %sh_prom
%sub = sub nsw i32 64, %c
%sh_prom1 = zext i32 %sub to i64
%shl = shl i64 %b, %sh_prom1
%or = or i64 %shl, %shr
ret i64 %or
}